repo_name
string
path
string
copies
string
size
string
content
string
license
string
h2o64/android_kernel_motorola_msm8226
drivers/net/wireless/ath/ath5k/pcu.c
4920
28524
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org> * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu> * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org> * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /*********************************\ * Protocol Control Unit Functions * \*********************************/ #include <asm/unaligned.h> #include "ath5k.h" #include "reg.h" #include "debug.h" /** * DOC: Protocol Control Unit (PCU) functions * * Protocol control unit is responsible to maintain various protocol * properties before a frame is send and after a frame is received to/from * baseband. To be more specific, PCU handles: * * - Buffering of RX and TX frames (after QCU/DCUs) * * - Encrypting and decrypting (using the built-in engine) * * - Generating ACKs, RTS/CTS frames * * - Maintaining TSF * * - FCS * * - Updating beacon data (with TSF etc) * * - Generating virtual CCA * * - RX/Multicast filtering * * - BSSID filtering * * - Various statistics * * -Different operating modes: AP, STA, IBSS * * Note: Most of these functions can be tweaked/bypassed so you can do * them on sw above for debugging or research. For more infos check out PCU * registers on reg.h. */ /** * DOC: ACK rates * * AR5212+ can use higher rates for ack transmission * based on current tx rate instead of the base rate. * It does this to better utilize channel usage. * There is a mapping between G rates (that cover both * CCK and OFDM) and ack rates that we use when setting * rate -> duration table. This mapping is hw-based so * don't change anything. * * To enable this functionality we must set * ah->ah_ack_bitrate_high to true else base rate is * used (1Mb for CCK, 6Mb for OFDM). */ static const unsigned int ack_rates_high[] = /* Tx -> ACK */ /* 1Mb -> 1Mb */ { 0, /* 2MB -> 2Mb */ 1, /* 5.5Mb -> 2Mb */ 1, /* 11Mb -> 2Mb */ 1, /* 6Mb -> 6Mb */ 4, /* 9Mb -> 6Mb */ 4, /* 12Mb -> 12Mb */ 6, /* 18Mb -> 12Mb */ 6, /* 24Mb -> 24Mb */ 8, /* 36Mb -> 24Mb */ 8, /* 48Mb -> 24Mb */ 8, /* 54Mb -> 24Mb */ 8 }; /*******************\ * Helper functions * \*******************/ /** * ath5k_hw_get_frame_duration() - Get tx time of a frame * @ah: The &struct ath5k_hw * @len: Frame's length in bytes * @rate: The @struct ieee80211_rate * @shortpre: Indicate short preample * * Calculate tx duration of a frame given it's rate and length * It extends ieee80211_generic_frame_duration for non standard * bwmodes. */ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, int len, struct ieee80211_rate *rate, bool shortpre) { int sifs, preamble, plcp_bits, sym_time; int bitrate, bits, symbols, symbol_bits; int dur; /* Fallback */ if (!ah->ah_bwmode) { __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw, NULL, len, rate); /* subtract difference between long and short preamble */ dur = le16_to_cpu(raw_dur); if (shortpre) dur -= 96; return dur; } bitrate = rate->bitrate; preamble = AR5K_INIT_OFDM_PREAMPLE_TIME; plcp_bits = AR5K_INIT_OFDM_PLCP_BITS; sym_time = AR5K_INIT_OFDM_SYMBOL_TIME; switch (ah->ah_bwmode) { case AR5K_BWMODE_40MHZ: sifs = AR5K_INIT_SIFS_TURBO; preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN; break; case AR5K_BWMODE_10MHZ: sifs = AR5K_INIT_SIFS_HALF_RATE; preamble *= 2; sym_time *= 2; break; case AR5K_BWMODE_5MHZ: sifs = AR5K_INIT_SIFS_QUARTER_RATE; preamble *= 4; sym_time *= 4; break; default: sifs = AR5K_INIT_SIFS_DEFAULT_BG; break; } bits = plcp_bits + (len << 3); /* Bit rate is in 100Kbits */ symbol_bits = bitrate * sym_time; symbols = DIV_ROUND_UP(bits * 10, symbol_bits); dur = sifs + preamble + (sym_time * symbols); return dur; } /** * ath5k_hw_get_default_slottime() - Get the default slot time for current mode * @ah: The &struct ath5k_hw */ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; unsigned int slot_time; switch (ah->ah_bwmode) { case AR5K_BWMODE_40MHZ: slot_time = AR5K_INIT_SLOT_TIME_TURBO; break; case AR5K_BWMODE_10MHZ: slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE; break; case AR5K_BWMODE_5MHZ: slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE; break; case AR5K_BWMODE_DEFAULT: default: slot_time = AR5K_INIT_SLOT_TIME_DEFAULT; if ((channel->hw_value == AR5K_MODE_11B) && !ah->ah_short_slot) slot_time = AR5K_INIT_SLOT_TIME_B; break; } return slot_time; } /** * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode * @ah: The &struct ath5k_hw */ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) { struct ieee80211_channel *channel = ah->ah_current_channel; unsigned int sifs; switch (ah->ah_bwmode) { case AR5K_BWMODE_40MHZ: sifs = AR5K_INIT_SIFS_TURBO; break; case AR5K_BWMODE_10MHZ: sifs = AR5K_INIT_SIFS_HALF_RATE; break; case AR5K_BWMODE_5MHZ: sifs = AR5K_INIT_SIFS_QUARTER_RATE; break; case AR5K_BWMODE_DEFAULT: sifs = AR5K_INIT_SIFS_DEFAULT_BG; default: if (channel->band == IEEE80211_BAND_5GHZ) sifs = AR5K_INIT_SIFS_DEFAULT_A; break; } return sifs; } /** * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics) * @ah: The &struct ath5k_hw * * Reads MIB counters from PCU and updates sw statistics. Is called after a * MIB interrupt, because one of these counters might have reached their maximum * and triggered the MIB interrupt, to let us read and clear the counter. * * NOTE: Is called in interrupt context! */ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah) { struct ath5k_statistics *stats = &ah->stats; /* Read-And-Clear */ stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL); stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL); stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK); stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL); stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT); } /******************\ * ACK/CTS Timeouts * \******************/ /** * ath5k_hw_write_rate_duration() - Fill rate code to duration table * @ah: The &struct ath5k_hw * * Write the rate code to duration table upon hw reset. This is a helper for * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on * the hardware, based on current mode, for each rate. The rates which are * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have * different rate code so we write their value twice (one for long preamble * and one for short). * * Note: Band doesn't matter here, if we set the values for OFDM it works * on both a and g modes. So all we have to do is set values for all g rates * that include all OFDM and CCK rates. * */ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) { struct ieee80211_rate *rate; unsigned int i; /* 802.11g covers both OFDM and CCK */ u8 band = IEEE80211_BAND_2GHZ; /* Write rate duration table */ for (i = 0; i < ah->sbands[band].n_bitrates; i++) { u32 reg; u16 tx_time; if (ah->ah_ack_bitrate_high) rate = &ah->sbands[band].bitrates[ack_rates_high[i]]; /* CCK -> 1Mb */ else if (i < 4) rate = &ah->sbands[band].bitrates[0]; /* OFDM -> 6Mb */ else rate = &ah->sbands[band].bitrates[4]; /* Set ACK timeout */ reg = AR5K_RATE_DUR(rate->hw_value); /* An ACK frame consists of 10 bytes. If you add the FCS, * which ieee80211_generic_frame_duration() adds, * its 14 bytes. Note we use the control rate and not the * actual rate for this rate. See mac80211 tx.c * ieee80211_duration() for a brief description of * what rate we should choose to TX ACKs. */ tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); ath5k_hw_reg_write(ah, tx_time, reg); if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) continue; tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true); ath5k_hw_reg_write(ah, tx_time, reg + (AR5K_SET_SHORT_PREAMBLE << 2)); } } /** * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) { if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) <= timeout) return -EINVAL; AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK, ath5k_hw_htoclock(ah, timeout)); return 0; } /** * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) { if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) <= timeout) return -EINVAL; AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS, ath5k_hw_htoclock(ah, timeout)); return 0; } /*******************\ * RX filter Control * \*******************/ /** * ath5k_hw_set_lladdr() - Set station id * @ah: The &struct ath5k_hw * @mac: The card's mac address (array of octets) * * Set station id on hw using the provided mac address */ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) { struct ath_common *common = ath5k_hw_common(ah); u32 low_id, high_id; u32 pcu_reg; /* Set new station ID */ memcpy(common->macaddr, mac, ETH_ALEN); pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; low_id = get_unaligned_le32(mac); high_id = get_unaligned_le16(mac + 4); ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); return 0; } /** * ath5k_hw_set_bssid() - Set current BSSID on hw * @ah: The &struct ath5k_hw * * Sets the current BSSID and BSSID mask we have from the * common struct into the hardware */ void ath5k_hw_set_bssid(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); u16 tim_offset = 0; /* * Set BSSID mask on 5212 */ if (ah->ah_version == AR5K_AR5212) ath_hw_setbssidmask(common); /* * Set BSSID */ ath5k_hw_reg_write(ah, get_unaligned_le32(common->curbssid), AR5K_BSS_ID0); ath5k_hw_reg_write(ah, get_unaligned_le16(common->curbssid + 4) | ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1); if (common->curaid == 0) { ath5k_hw_disable_pspoll(ah); return; } AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM, tim_offset ? tim_offset + 4 : 0); ath5k_hw_enable_pspoll(ah, NULL, 0); } /** * ath5k_hw_set_bssid_mask() - Filter out bssids we listen * @ah: The &struct ath5k_hw * @mask: The BSSID mask to set (array of octets) * * BSSID masking is a method used by AR5212 and newer hardware to inform PCU * which bits of the interface's MAC address should be looked at when trying * to decide which packets to ACK. In station mode and AP mode with a single * BSS every bit matters since we lock to only one BSS. In AP mode with * multiple BSSes (virtual interfaces) not every bit matters because hw must * accept frames for all BSSes and so we tweak some bits of our mac address * in order to have multiple BSSes. * * For more information check out ../hw.c of the common ath module. */ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) { struct ath_common *common = ath5k_hw_common(ah); /* Cache bssid mask so that we can restore it * on reset */ memcpy(common->bssidmask, mask, ETH_ALEN); if (ah->ah_version == AR5K_AR5212) ath_hw_setbssidmask(common); } /** * ath5k_hw_set_mcast_filter() - Set multicast filter * @ah: The &struct ath5k_hw * @filter0: Lower 32bits of muticast filter * @filter1: Higher 16bits of multicast filter */ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) { ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); } /** * ath5k_hw_get_rx_filter() - Get current rx filter * @ah: The &struct ath5k_hw * * Returns the RX filter by reading rx filter and * phy error filter registers. RX filter is used * to set the allowed frame types that PCU will accept * and pass to the driver. For a list of frame types * check out reg.h. */ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) { u32 data, filter = 0; filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER); /*Radar detection for 5212*/ if (ah->ah_version == AR5K_AR5212) { data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL); if (data & AR5K_PHY_ERR_FIL_RADAR) filter |= AR5K_RX_FILTER_RADARERR; if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK)) filter |= AR5K_RX_FILTER_PHYERR; } return filter; } /** * ath5k_hw_set_rx_filter() - Set rx filter * @ah: The &struct ath5k_hw * @filter: RX filter mask (see reg.h) * * Sets RX filter register and also handles PHY error filter * register on 5212 and newer chips so that we have proper PHY * error reporting. */ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) { u32 data = 0; /* Set PHY error filter register on 5212*/ if (ah->ah_version == AR5K_AR5212) { if (filter & AR5K_RX_FILTER_RADARERR) data |= AR5K_PHY_ERR_FIL_RADAR; if (filter & AR5K_RX_FILTER_PHYERR) data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK; } /* * The AR5210 uses promiscuous mode to detect radar activity */ if (ah->ah_version == AR5K_AR5210 && (filter & AR5K_RX_FILTER_RADARERR)) { filter &= ~AR5K_RX_FILTER_RADARERR; filter |= AR5K_RX_FILTER_PROM; } /*Zero length DMA (phy error reporting) */ if (data) AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); else AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); /*Write RX Filter register*/ ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER); /*Write PHY error filter register on 5212*/ if (ah->ah_version == AR5K_AR5212) ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL); } /****************\ * Beacon control * \****************/ #define ATH5K_MAX_TSF_READ 10 /** * ath5k_hw_get_tsf64() - Get the full 64bit TSF * @ah: The &struct ath5k_hw * * Returns the current TSF */ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) { u32 tsf_lower, tsf_upper1, tsf_upper2; int i; unsigned long flags; /* This code is time critical - we don't want to be interrupted here */ local_irq_save(flags); /* * While reading TSF upper and then lower part, the clock is still * counting (or jumping in case of IBSS merge) so we might get * inconsistent values. To avoid this, we read the upper part again * and check it has not been changed. We make the hypothesis that a * maximum of 3 changes can happens in a row (we use 10 as a safe * value). * * Impact on performance is pretty small, since in most cases, only * 3 register reads are needed. */ tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32); for (i = 0; i < ATH5K_MAX_TSF_READ; i++) { tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32); tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32); if (tsf_upper2 == tsf_upper1) break; tsf_upper1 = tsf_upper2; } local_irq_restore(flags); WARN_ON(i == ATH5K_MAX_TSF_READ); return ((u64)tsf_upper1 << 32) | tsf_lower; } #undef ATH5K_MAX_TSF_READ /** * ath5k_hw_set_tsf64() - Set a new 64bit TSF * @ah: The &struct ath5k_hw * @tsf64: The new 64bit TSF * * Sets the new TSF */ void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) { ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); } /** * ath5k_hw_reset_tsf() - Force a TSF reset * @ah: The &struct ath5k_hw * * Forces a TSF reset on PCU */ void ath5k_hw_reset_tsf(struct ath5k_hw *ah) { u32 val; val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF; /* * Each write to the RESET_TSF bit toggles a hardware internal * signal to reset TSF, but if left high it will cause a TSF reset * on the next chip reset as well. Thus we always write the value * twice to clear the signal. */ ath5k_hw_reg_write(ah, val, AR5K_BEACON); ath5k_hw_reg_write(ah, val, AR5K_BEACON); } /** * ath5k_hw_init_beacon_timers() - Initialize beacon timers * @ah: The &struct ath5k_hw * @next_beacon: Next TBTT * @interval: Current beacon interval * * This function is used to initialize beacon timers based on current * operation mode and settings. */ void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval) { u32 timer1, timer2, timer3; /* * Set the additional timers by mode */ switch (ah->opmode) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_STATION: /* In STA mode timer1 is used as next wakeup * timer and timer2 as next CFP duration start * timer. Both in 1/8TUs. */ /* TODO: PCF handling */ if (ah->ah_version == AR5K_AR5210) { timer1 = 0xffffffff; timer2 = 0xffffffff; } else { timer1 = 0x0000ffff; timer2 = 0x0007ffff; } /* Mark associated AP as PCF incapable for now */ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF); break; case NL80211_IFTYPE_ADHOC: AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM); default: /* On non-STA modes timer1 is used as next DMA * beacon alert (DBA) timer and timer2 as next * software beacon alert. Both in 1/8TUs. */ timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3; timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3; break; } /* Timer3 marks the end of our ATIM window * a zero length window is not allowed because * we 'll get no beacons */ timer3 = next_beacon + 1; /* * Set the beacon register and enable all timers. */ /* When in AP or Mesh Point mode zero timer0 to start TSF */ if (ah->opmode == NL80211_IFTYPE_AP || ah->opmode == NL80211_IFTYPE_MESH_POINT) ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1); ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2); ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3); /* Force a TSF reset if requested and enable beacons */ if (interval & AR5K_BEACON_RESET_TSF) ath5k_hw_reset_tsf(ah); ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD | AR5K_BEACON_ENABLE), AR5K_BEACON); /* Flush any pending BMISS interrupts on ISR by * performing a clear-on-write operation on PISR * register for the BMISS bit (writing a bit on * ISR toggles a reset for that bit and leaves * the remaining bits intact) */ if (ah->ah_version == AR5K_AR5210) ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR); else ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR); /* TODO: Set enhanced sleep registers on AR5212 * based on vif->bss_conf params, until then * disable power save reporting.*/ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV); } /** * ath5k_check_timer_win() - Check if timer B is timer A + window * @a: timer a (before b) * @b: timer b (after a) * @window: difference between a and b * @intval: timers are increased by this interval * * This helper function checks if timer B is timer A + window and covers * cases where timer A or B might have already been updated or wrapped * around (Timers are 16 bit). * * Returns true if O.K. */ static inline bool ath5k_check_timer_win(int a, int b, int window, int intval) { /* * 1.) usually B should be A + window * 2.) A already updated, B not updated yet * 3.) A already updated and has wrapped around * 4.) B has wrapped around */ if ((b - a == window) || /* 1.) */ (a - b == intval - window) || /* 2.) */ ((a | 0x10000) - b == intval - window) || /* 3.) */ ((b | 0x10000) - a == window)) /* 4.) */ return true; /* O.K. */ return false; } /** * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct * @ah: The &struct ath5k_hw * @intval: beacon interval * * This is a workaround for IBSS mode * * The need for this function arises from the fact that we have 4 separate * HW timer registers (TIMER0 - TIMER3), which are closely related to the * next beacon target time (NBTT), and that the HW updates these timers * separately based on the current TSF value. The hardware increments each * timer by the beacon interval, when the local TSF converted to TU is equal * to the value stored in the timer. * * The reception of a beacon with the same BSSID can update the local HW TSF * at any time - this is something we can't avoid. If the TSF jumps to a * time which is later than the time stored in a timer, this timer will not * be updated until the TSF in TU wraps around at 16 bit (the size of the * timers) and reaches the time which is stored in the timer. * * The problem is that these timers are closely related to TIMER0 (NBTT) and * that they define a time "window". When the TSF jumps between two timers * (e.g. ATIM and NBTT), the one in the past will be left behind (not * updated), while the one in the future will be updated every beacon * interval. This causes the window to get larger, until the TSF wraps * around as described above and the timer which was left behind gets * updated again. But - because the beacon interval is usually not an exact * divisor of the size of the timers (16 bit), an unwanted "window" between * these timers has developed! * * This is especially important with the ATIM window, because during * the ATIM window only ATIM frames and no data frames are allowed to be * sent, which creates transmission pauses after each beacon. This symptom * has been described as "ramping ping" because ping times increase linearly * for some time and then drop down again. A wrong window on the DMA beacon * timer has the same effect, so we check for these two conditions. * * Returns true if O.K. */ bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval) { unsigned int nbtt, atim, dma; nbtt = ath5k_hw_reg_read(ah, AR5K_TIMER0); atim = ath5k_hw_reg_read(ah, AR5K_TIMER3); dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3; /* NOTE: SWBA is different. Having a wrong window there does not * stop us from sending data and this condition is caught by * other means (SWBA interrupt) */ if (ath5k_check_timer_win(nbtt, atim, 1, intval) && ath5k_check_timer_win(dma, nbtt, AR5K_TUNE_DMA_BEACON_RESP, intval)) return true; /* O.K. */ return false; } /** * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class * @ah: The &struct ath5k_hw * @coverage_class: IEEE 802.11 coverage class number * * Sets IFS intervals and ACK/CTS timeouts for given coverage class. */ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) { /* As defined by IEEE 802.11-2007 17.3.8.6 */ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class; int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time; int cts_timeout = ack_timeout; ath5k_hw_set_ifs_intervals(ah, slot_time); ath5k_hw_set_ack_timeout(ah, ack_timeout); ath5k_hw_set_cts_timeout(ah, cts_timeout); ah->ah_coverage_class = coverage_class; } /***************************\ * Init/Start/Stop functions * \***************************/ /** * ath5k_hw_start_rx_pcu() - Start RX engine * @ah: The &struct ath5k_hw * * Starts RX engine on PCU so that hw can process RXed frames * (ACK etc). * * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma */ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) { AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** * at5k_hw_stop_rx_pcu() - Stop RX engine * @ah: The &struct ath5k_hw * * Stops RX engine on PCU */ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) { AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** * ath5k_hw_set_opmode() - Set PCU operating mode * @ah: The &struct ath5k_hw * @op_mode: One of enum nl80211_iftype * * Configure PCU for the various operating modes (AP/STA etc) */ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) { struct ath_common *common = ath5k_hw_common(ah); u32 pcu_reg, beacon_reg, low_id, high_id; ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode); /* Preserve rest settings */ pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE | (ah->ah_version == AR5K_AR5210 ? (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0)); beacon_reg = 0; switch (op_mode) { case NL80211_IFTYPE_ADHOC: pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; beacon_reg |= AR5K_BCR_ADHOC; if (ah->ah_version == AR5K_AR5210) pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; else AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE; beacon_reg |= AR5K_BCR_AP; if (ah->ah_version == AR5K_AR5210) pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; else AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS); break; case NL80211_IFTYPE_STATION: pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE | (ah->ah_version == AR5K_AR5210 ? AR5K_STA_ID1_PWR_SV : 0); case NL80211_IFTYPE_MONITOR: pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE | (ah->ah_version == AR5K_AR5210 ? AR5K_STA_ID1_NO_PSPOLL : 0); break; default: return -EINVAL; } /* * Set PCU registers */ low_id = get_unaligned_le32(common->macaddr); high_id = get_unaligned_le16(common->macaddr + 4); ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); /* * Set Beacon Control Register on 5210 */ if (ah->ah_version == AR5K_AR5210) ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR); return 0; } /** * ath5k_hw_pcu_init() - Initialize PCU * @ah: The &struct ath5k_hw * @op_mode: One of enum nl80211_iftype * @mode: One of enum ath5k_driver_mode * * This function is used to initialize PCU by setting current * operation mode and various other settings. */ void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode) { /* Set bssid and bssid mask */ ath5k_hw_set_bssid(ah); /* Set PCU config */ ath5k_hw_set_opmode(ah, op_mode); /* Write rate duration table only on AR5212 and if * virtual interface has already been brought up * XXX: rethink this after new mode changes to * mac80211 are integrated */ if (ah->ah_version == AR5K_AR5212 && ah->nvifs) ath5k_hw_write_rate_duration(ah); /* Set RSSI/BRSSI thresholds * * Note: If we decide to set this value * dynamically, have in mind that when AR5K_RSSI_THR * register is read it might return 0x40 if we haven't * wrote anything to it plus BMISS RSSI threshold is zeroed. * So doing a save/restore procedure here isn't the right * choice. Instead store it on ath5k_hw */ ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES | AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S), AR5K_RSSI_THR); /* MIC QoS support */ if (ah->ah_mac_srev >= AR5K_SREV_AR2413) { ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL); ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL); } /* QoS NOACK Policy */ if (ah->ah_version == AR5K_AR5212) { ath5k_hw_reg_write(ah, AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) | AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) | AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET), AR5K_QOS_NOACK); } /* Restore slot time and ACK timeouts */ if (ah->ah_coverage_class > 0) ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class); /* Set ACK bitrate mode (see ack_rates_high) */ if (ah->ah_version == AR5K_AR5212) { u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB; if (ah->ah_ack_bitrate_high) AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val); else AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val); } return; }
gpl-2.0
faux123/Galaxy_Note_3
fs/9p/xattr.c
5176
4363
/* * Copyright IBM Corporation, 2010 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/sched.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "fid.h" #include "xattr.h" ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, void *buffer, size_t buffer_size) { ssize_t retval; int msize, read_count; u64 offset = 0, attr_size; struct p9_fid *attr_fid; attr_fid = p9_client_xattrwalk(fid, name, &attr_size); if (IS_ERR(attr_fid)) { retval = PTR_ERR(attr_fid); p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n", retval); attr_fid = NULL; goto error; } if (!buffer_size) { /* request to get the attr_size */ retval = attr_size; goto error; } if (attr_size > buffer_size) { retval = -ERANGE; goto error; } msize = attr_fid->clnt->msize; while (attr_size) { if (attr_size > (msize - P9_IOHDRSZ)) read_count = msize - P9_IOHDRSZ; else read_count = attr_size; read_count = p9_client_read(attr_fid, ((char *)buffer)+offset, NULL, offset, read_count); if (read_count < 0) { /* error in xattr read */ retval = read_count; goto error; } offset += read_count; attr_size -= read_count; } /* Total read xattr bytes */ retval = offset; error: if (attr_fid) p9_client_clunk(attr_fid); return retval; } /* * v9fs_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name, void *buffer, size_t buffer_size) { struct p9_fid *fid; p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n", name, buffer_size); fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); return v9fs_fid_xattr_get(fid, name, buffer, buffer_size); } /* * v9fs_xattr_set() * * Create, replace or remove an extended attribute for this inode. Buffer * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int v9fs_xattr_set(struct dentry *dentry, const char *name, const void *value, size_t value_len, int flags) { u64 offset = 0; int retval, msize, write_count; struct p9_fid *fid = NULL; p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); fid = v9fs_fid_clone(dentry); if (IS_ERR(fid)) { retval = PTR_ERR(fid); fid = NULL; goto error; } /* * On success fid points to xattr */ retval = p9_client_xattrcreate(fid, name, value_len, flags); if (retval < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", retval); goto error; } msize = fid->clnt->msize; while (value_len) { if (value_len > (msize - P9_IOHDRSZ)) write_count = msize - P9_IOHDRSZ; else write_count = value_len; write_count = p9_client_write(fid, ((char *)value)+offset, NULL, offset, write_count); if (write_count < 0) { /* error in xattr write */ retval = write_count; goto error; } offset += write_count; value_len -= write_count; } /* Total read xattr bytes */ retval = offset; error: if (fid) retval = p9_client_clunk(fid); return retval; } ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { return v9fs_xattr_get(dentry, NULL, buffer, buffer_size); } const struct xattr_handler *v9fs_xattr_handlers[] = { &v9fs_xattr_user_handler, #ifdef CONFIG_9P_FS_POSIX_ACL &v9fs_xattr_acl_access_handler, &v9fs_xattr_acl_default_handler, #endif NULL };
gpl-2.0
PRJosh/kernel_msm-3.10
arch/ia64/sn/kernel/sn2/timer_interrupt.c
14136
2033
/* * * * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like. Any license provided herein, whether implied or * otherwise, applies only to this software file. Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU General Public * License along with this program; if not, write the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan */ #include <linux/interrupt.h> #include <asm/sn/pda.h> #include <asm/sn/leds.h> extern void sn_lb_int_war_check(void); extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); #define SN_LB_INT_WAR_INTERVAL 100 void sn_timer_interrupt(int irq, void *dev_id) { /* LED blinking */ if (!pda->hb_count--) { pda->hb_count = HZ / 2; set_led_bits(pda->hb_state ^= LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT); } if (is_shub1()) { if (enable_shub_wars_1_1()) { /* Bugfix code for SHUB 1.1 */ if (pda->pio_shub_war_cam_addr) *pda->pio_shub_war_cam_addr = 0x8000000000000010UL; } if (pda->sn_lb_int_war_ticks == 0) sn_lb_int_war_check(); pda->sn_lb_int_war_ticks++; if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL) pda->sn_lb_int_war_ticks = 0; } }
gpl-2.0
0mark/linux-sunxi
drivers/video/sunxi/lcd/lcd_bak/cpu_320x240_kgm281i0.c
57
5753
/* * Copyright (C) 2007-2012 Allwinner Technology Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include "lcd_panel_cfg.h" #include "../disp/ebios_lcdc_tve.h" /* * comment out this line if you want to use the lcd para define in * sys_config1.fex */ //#define LCD_PARA_USE_CONFIG #ifdef LCD_PARA_USE_CONFIG static void LCD_cfg_panel_info(__panel_para_t *info) { memset(info, 0, sizeof(__panel_para_t)); info->lcd_x = 320; info->lcd_y = 240; info->lcd_dclk_freq = 6; /* MHz */ info->lcd_ht = 320 + 30; /* htotal */ info->lcd_hbp = 20; /* h back porch */ info->lcd_hv_hspw = 10; /* hsync */ info->lcd_vt = (240 + 30) * 2; /* vtotal * 2 */ info->lcd_vbp = 20; /* v back porch */ info->lcd_hv_vspw = 10; /* vsync */ info->lcd_if = 1; /* 0:hv(sync+de); 1:cpu/8080; 2:ttl; 3:lvds */ info->lcd_cpu_if = 0; /* 0:18bit 4:16bit */ info->lcd_frm = 1; /* 0:direct; 1:rgb666 dither; 2:rgb656 dither */ info->lcd_pwm_not_used = 0; info->lcd_pwm_ch = 0; info->lcd_pwm_freq = 12500; /* Hz */ info->lcd_pwm_pol = 0; info->lcd_io_cfg0 = 0x10000000; /* clock phase */ info->lcd_gamma_correction_en = 0; } #endif /* * lcd flow function * CPU Panel:first TCON_open,than lcd_panel_init */ static __s32 LCD_open_flow(__u32 sel) { /* open lcd power, than delay 50ms */ LCD_OPEN_FUNC(sel, LCD_power_on_generic, 50); /* open lcd controller, than delay 500ms */ LCD_OPEN_FUNC(sel, TCON_open, 500); /* lcd panel initial, than delay 50ms */ LCD_OPEN_FUNC(sel, LCD_panel_init, 50); /* open lcd backlight, than delay 0ms */ LCD_OPEN_FUNC(sel, LCD_bl_open_generic, 0); return 0; } static __s32 LCD_close_flow(__u32 sel) { /* close lcd backlight, than delay 0ms */ LCD_CLOSE_FUNC(sel, LCD_bl_close_generic, 0); /* lcd panel exit, than delay 0ms */ LCD_CLOSE_FUNC(sel, LCD_panel_exit, 0); /* close lcd controller, than delay 0ms */ LCD_CLOSE_FUNC(sel, TCON_close, 0); /* close lcd power, than delay 1000ms */ LCD_CLOSE_FUNC(sel, LCD_power_off_generic, 1000); return 0; } /* * lcd panel initial * cpu 8080 bus initial */ #define kgm281i0_rs(sel, data) LCD_GPIO_write(sel, 0, data) static void kgm281i0_write_gram_origin(__u32 sel) { LCD_CPU_WR(sel, 0x0020, 0); /* GRAM horizontal Address */ LCD_CPU_WR(sel, 0x0021, 319); /* GRAM Vertical Address */ LCD_CPU_WR_INDEX(sel, 0x22); /* Write Memery Start */ } static void kgm281i0_init(__u32 sel) { kgm281i0_rs(sel, 1); msleep(50); kgm281i0_rs(sel, 0); msleep(50); kgm281i0_rs(sel, 1); LCD_CPU_WR(sel, 0x0000, 0x0001); LCD_CPU_WR(sel, 0x0001, 0x0100); LCD_CPU_WR(sel, 0x0002, 0x0400); LCD_CPU_WR(sel, 0x0003, 0x1018); LCD_CPU_WR(sel, 0x0004, 0x0000); LCD_CPU_WR(sel, 0x0008, 0x0202); LCD_CPU_WR(sel, 0x0009, 0x0000); LCD_CPU_WR(sel, 0x000A, 0x0000); LCD_CPU_WR(sel, 0x000C, 0x0000); LCD_CPU_WR(sel, 0x000D, 0x0000); LCD_CPU_WR(sel, 0x000F, 0x0000); LCD_CPU_WR(sel, 0x0010, 0x0000); LCD_CPU_WR(sel, 0x0011, 0x0007); LCD_CPU_WR(sel, 0x0012, 0x0000); LCD_CPU_WR(sel, 0x0013, 0x0000); msleep(50); LCD_CPU_WR(sel, 0x0010, 0x17B0); LCD_CPU_WR(sel, 0x0011, 0x0001); msleep(50); LCD_CPU_WR(sel, 0x0012, 0x013C); msleep(50); LCD_CPU_WR(sel, 0x0013, 0x1300); LCD_CPU_WR(sel, 0x0029, 0x0012); msleep(50); LCD_CPU_WR(sel, 0x0020, 0x0000); LCD_CPU_WR(sel, 0x0021, 0x0000); LCD_CPU_WR(sel, 0x002B, 0x0020); LCD_CPU_WR(sel, 0x0030, 0x0000); LCD_CPU_WR(sel, 0x0031, 0x0306); LCD_CPU_WR(sel, 0x0032, 0x0200); LCD_CPU_WR(sel, 0x0035, 0x0107); LCD_CPU_WR(sel, 0x0036, 0x0404); LCD_CPU_WR(sel, 0x0037, 0x0606); LCD_CPU_WR(sel, 0x0038, 0x0105); LCD_CPU_WR(sel, 0x0039, 0x0707); LCD_CPU_WR(sel, 0x003C, 0x0600); LCD_CPU_WR(sel, 0x003D, 0x0807); LCD_CPU_WR(sel, 0x0050, 0x0000); LCD_CPU_WR(sel, 0x0051, 0x00EF); LCD_CPU_WR(sel, 0x0052, 0x0000); LCD_CPU_WR(sel, 0x0053, 0x013F); LCD_CPU_WR(sel, 0x0060, 0x2700); LCD_CPU_WR(sel, 0x0061, 0x0001); LCD_CPU_WR(sel, 0x006A, 0x0000); LCD_CPU_WR(sel, 0x0080, 0x0000); LCD_CPU_WR(sel, 0x0081, 0x0000); LCD_CPU_WR(sel, 0x0082, 0x0000); LCD_CPU_WR(sel, 0x0083, 0x0000); LCD_CPU_WR(sel, 0x0084, 0x0000); LCD_CPU_WR(sel, 0x0085, 0x0000); LCD_CPU_WR(sel, 0x0090, 0x0013); LCD_CPU_WR(sel, 0x0092, 0x0000); LCD_CPU_WR(sel, 0x0093, 0x0003); LCD_CPU_WR(sel, 0x0095, 0x0110); LCD_CPU_WR(sel, 0x0097, 0x0000); LCD_CPU_WR(sel, 0x0098, 0x0000); LCD_CPU_WR(sel, 0x0007, 0x0001); msleep(50); LCD_CPU_WR(sel, 0x0007, 0x0021); LCD_CPU_WR(sel, 0x0007, 0x0023); msleep(50); LCD_CPU_WR(sel, 0x0007, 0x0173); } /* * irq func */ static void Lcd_cpuisr_proc(void) { kgm281i0_write_gram_origin(0); } static void LCD_panel_init(__u32 sel) { kgm281i0_init(sel); /* initial lcd panel */ kgm281i0_write_gram_origin(sel); /* set gram origin */ LCD_CPU_register_irq(sel, Lcd_cpuisr_proc); /* register cpu irq func */ LCD_CPU_AUTO_FLUSH(sel, 1); /* start sent gram data */ } static void LCD_panel_exit(__u32 sel) { } void LCD_get_panel_funs_0(__lcd_panel_fun_t *fun) { #ifdef LCD_PARA_USE_CONFIG fun->cfg_panel_info = LCD_cfg_panel_info; #endif fun->cfg_open_flow = LCD_open_flow; fun->cfg_close_flow = LCD_close_flow; }
gpl-2.0
zrafa/linuxkernel
linux-2.6.17.new/net/sunrpc/auth_gss/gss_krb5_seqnum.c
57
2635
/* * linux/net/sunrpc/gss_krb5_seqnum.c * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/util_seqnum.c * * Copyright (c) 2000 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> */ /* * Copyright 1993 by OpenVision Technologies, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appears in all copies and * that both that copyright notice and this permission notice appear in * supporting documentation, and that the name of OpenVision not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. OpenVision makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/crypto.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif s32 krb5_make_seq_num(struct crypto_tfm *key, int direction, s32 seqnum, unsigned char *cksum, unsigned char *buf) { unsigned char plain[8]; plain[0] = (unsigned char) (seqnum & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); plain[3] = (unsigned char) ((seqnum >> 24) & 0xff); plain[4] = direction; plain[5] = direction; plain[6] = direction; plain[7] = direction; return krb5_encrypt(key, cksum, plain, buf, 8); } s32 krb5_get_seq_num(struct crypto_tfm *key, unsigned char *cksum, unsigned char *buf, int *direction, s32 * seqnum) { s32 code; unsigned char plain[8]; dprintk("RPC: krb5_get_seq_num:\n"); if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) return code; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || (plain[4] != plain[7])) return (s32)KG_BAD_SEQ; *direction = plain[4]; *seqnum = ((plain[0]) | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); return (0); }
gpl-2.0
KaijunTang/linux-kernel
drivers/net/wireless/ath/ath9k/ar9002_phy.c
57
16964
/* * Copyright (c) 2008-2010 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Programming Atheros 802.11n analog front end radios * * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express * devices have either an external AR2133 analog front end radio for single * band 2.4 GHz communication or an AR5133 analog front end radio for dual * band 2.4 GHz / 5 GHz communication. * * All devices after the AR5416 and AR5418 family starting with the AR9280 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded * into a single-chip and require less programming. * * The following single-chips exist with a respective embedded radio: * * AR9280 - 11n dual-band 2x2 MIMO for PCIe * AR9281 - 11n single-band 1x2 MIMO for PCIe * AR9285 - 11n single-band 1x1 for PCIe * AR9287 - 11n single-band 2x2 MIMO for PCIe * * AR9220 - 11n dual-band 2x2 MIMO for PCI * AR9223 - 11n single-band 2x2 MIMO for PCI * * AR9287 - 11n single-band 1x1 MIMO for USB */ #include "hw.h" #include "ar9002_phy.h" /** * ar9002_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure * @chan: * * This is the function to change channel on single-chip devices, that is * all devices after ar9280. * * This function takes the channel value in MHz and sets * hardware channel value. Assumes writes have been enabled to analog bus. * * Actual Expression, * * For 2GHz channel, * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) * (freq_ref = 40MHz) * * For 5GHz channel, * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10) * (freq_ref = 40MHz/(24>>amodeRefSel)) */ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { u16 bMode, fracMode, aModeRefSel = 0; u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; struct chan_centers centers; u32 refDivA = 24; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); reg32 &= 0xc0000000; if (freq < 4800) { /* 2 GHz, fractional mode */ u32 txctl; int regWrites = 0; bMode = 1; fracMode = 1; aModeRefSel = 0; channelSel = CHANSEL_2G(freq); if (AR_SREV_9287_11_OR_LATER(ah)) { if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484, 1, regWrites); } else { REG_WRITE_ARRAY(&ah->iniCckfirNormal, 1, regWrites); } } else { txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl | AR_PHY_CCK_TX_CTRL_JAPAN); } else { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); } } } else { bMode = 0; fracMode = 0; switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) { case 0: if ((freq % 20) == 0) aModeRefSel = 3; else if ((freq % 10) == 0) aModeRefSel = 2; if (aModeRefSel) break; case 1: default: aModeRefSel = 0; /* * Enable 2G (fractional) mode for channels * which are 5MHz spaced. */ fracMode = 1; refDivA = 1; channelSel = CHANSEL_5G(freq); /* RefDivA setting */ REG_RMW_FIELD(ah, AR_AN_SYNTH9, AR_AN_SYNTH9_REFDIVA, refDivA); } if (!fracMode) { ndiv = (freq * (refDivA >> aModeRefSel)) / 60; channelSel = ndiv & 0x1ff; channelFrac = (ndiv & 0xfffffe00) * 2; channelSel = (channelSel << 17) | channelFrac; } } reg32 = reg32 | (bMode << 29) | (fracMode << 28) | (aModeRefSel << 26) | (channelSel); REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); ah->curchan = chan; ah->curchan_rad_index = -1; return 0; } /** * ar9002_hw_spur_mitigate - convert baseband spur frequency * @ah: atheros hardware structure * @chan: * * For single-chip solutions. Converts to baseband spur frequency given the * input channel frequency and compute register settings below. */ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) { int bb_spur = AR_NO_SPUR; int freq; int bin, cur_bin; int bb_spur_off, spur_subchannel_sd; int spur_freq_sd; int spur_delta_phase; int denominator; int upper, lower, cur_vit_mask; int tmp, newVal; int i; int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 }; int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 }; int inc[4] = { 0, 100, 0, 0 }; struct chan_centers centers; int8_t mask_m[123]; int8_t mask_p[123]; int8_t mask_amt; int tmp_mask; int cur_bb_spur; bool is2GHz = IS_CHAN_2GHZ(chan); memset(&mask_m, 0, sizeof(int8_t) * 123); memset(&mask_p, 0, sizeof(int8_t) * 123); ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; ah->config.spurmode = SPUR_ENABLE_EEPROM; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); if (is2GHz) cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; else cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ; if (AR_NO_SPUR == cur_bb_spur) break; cur_bb_spur = cur_bb_spur - freq; if (IS_CHAN_HT40(chan)) { if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) && (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) { bb_spur = cur_bb_spur; break; } } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) && (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) { bb_spur = cur_bb_spur; break; } } if (AR_NO_SPUR == bb_spur) { REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); return; } else { REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); } bin = bb_spur * 320; tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); ENABLE_REGWRITE_BUFFER(ah); newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal); newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | AR_PHY_SPUR_REG_ENABLE_MASK_PPM | AR_PHY_SPUR_REG_MASK_RATE_SELECT | AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); REG_WRITE(ah, AR_PHY_SPUR_REG, newVal); if (IS_CHAN_HT40(chan)) { if (bb_spur < 0) { spur_subchannel_sd = 1; bb_spur_off = bb_spur + 10; } else { spur_subchannel_sd = 0; bb_spur_off = bb_spur - 10; } } else { spur_subchannel_sd = 0; bb_spur_off = bb_spur; } if (IS_CHAN_HT40(chan)) spur_delta_phase = ((bb_spur * 262144) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; else spur_delta_phase = ((bb_spur * 524288) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; denominator = IS_CHAN_2GHZ(chan) ? 44 : 40; spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff; newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); REG_WRITE(ah, AR_PHY_TIMING11, newVal); newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); cur_bin = -6000; upper = bin + 100; lower = bin - 100; for (i = 0; i < 4; i++) { int pilot_mask = 0; int chan_mask = 0; int bp = 0; for (bp = 0; bp < 30; bp++) { if ((cur_bin > lower) && (cur_bin < upper)) { pilot_mask = pilot_mask | 0x1 << bp; chan_mask = chan_mask | 0x1 << bp; } cur_bin += 100; } cur_bin += inc[i]; REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); REG_WRITE(ah, chan_mask_reg[i], chan_mask); } cur_vit_mask = 6100; upper = bin + 120; lower = bin - 120; for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { /* workaround for gcc bug #37014 */ volatile int tmp_v = abs(cur_vit_mask - bin); if (tmp_v < 75) mask_amt = 1; else mask_amt = 0; if (cur_vit_mask < 0) mask_m[abs(cur_vit_mask / 100)] = mask_amt; else mask_p[cur_vit_mask / 100] = mask_amt; } cur_vit_mask -= 100; } tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) | (mask_m[48] << 26) | (mask_m[49] << 24) | (mask_m[50] << 22) | (mask_m[51] << 20) | (mask_m[52] << 18) | (mask_m[53] << 16) | (mask_m[54] << 14) | (mask_m[55] << 12) | (mask_m[56] << 10) | (mask_m[57] << 8) | (mask_m[58] << 6) | (mask_m[59] << 4) | (mask_m[60] << 2) | (mask_m[61] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); tmp_mask = (mask_m[31] << 28) | (mask_m[32] << 26) | (mask_m[33] << 24) | (mask_m[34] << 22) | (mask_m[35] << 20) | (mask_m[36] << 18) | (mask_m[37] << 16) | (mask_m[48] << 14) | (mask_m[39] << 12) | (mask_m[40] << 10) | (mask_m[41] << 8) | (mask_m[42] << 6) | (mask_m[43] << 4) | (mask_m[44] << 2) | (mask_m[45] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) | (mask_m[18] << 26) | (mask_m[18] << 24) | (mask_m[20] << 22) | (mask_m[20] << 20) | (mask_m[22] << 18) | (mask_m[22] << 16) | (mask_m[24] << 14) | (mask_m[24] << 12) | (mask_m[25] << 10) | (mask_m[26] << 8) | (mask_m[27] << 6) | (mask_m[28] << 4) | (mask_m[29] << 2) | (mask_m[30] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) | (mask_m[2] << 26) | (mask_m[3] << 24) | (mask_m[4] << 22) | (mask_m[5] << 20) | (mask_m[6] << 18) | (mask_m[7] << 16) | (mask_m[8] << 14) | (mask_m[9] << 12) | (mask_m[10] << 10) | (mask_m[11] << 8) | (mask_m[12] << 6) | (mask_m[13] << 4) | (mask_m[14] << 2) | (mask_m[15] << 0); REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); tmp_mask = (mask_p[15] << 28) | (mask_p[14] << 26) | (mask_p[13] << 24) | (mask_p[12] << 22) | (mask_p[11] << 20) | (mask_p[10] << 18) | (mask_p[9] << 16) | (mask_p[8] << 14) | (mask_p[7] << 12) | (mask_p[6] << 10) | (mask_p[5] << 8) | (mask_p[4] << 6) | (mask_p[3] << 4) | (mask_p[2] << 2) | (mask_p[1] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); tmp_mask = (mask_p[30] << 28) | (mask_p[29] << 26) | (mask_p[28] << 24) | (mask_p[27] << 22) | (mask_p[26] << 20) | (mask_p[25] << 18) | (mask_p[24] << 16) | (mask_p[23] << 14) | (mask_p[22] << 12) | (mask_p[21] << 10) | (mask_p[20] << 8) | (mask_p[19] << 6) | (mask_p[18] << 4) | (mask_p[17] << 2) | (mask_p[16] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); tmp_mask = (mask_p[45] << 28) | (mask_p[44] << 26) | (mask_p[43] << 24) | (mask_p[42] << 22) | (mask_p[41] << 20) | (mask_p[40] << 18) | (mask_p[39] << 16) | (mask_p[38] << 14) | (mask_p[37] << 12) | (mask_p[36] << 10) | (mask_p[35] << 8) | (mask_p[34] << 6) | (mask_p[33] << 4) | (mask_p[32] << 2) | (mask_p[31] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) | (mask_p[59] << 26) | (mask_p[58] << 24) | (mask_p[57] << 22) | (mask_p[56] << 20) | (mask_p[55] << 18) | (mask_p[54] << 16) | (mask_p[53] << 14) | (mask_p[52] << 12) | (mask_p[51] << 10) | (mask_p[50] << 8) | (mask_p[49] << 6) | (mask_p[48] << 4) | (mask_p[47] << 2) | (mask_p[46] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); REGWRITE_BUFFER_FLUSH(ah); } static void ar9002_olc_init(struct ath_hw *ah) { u32 i; if (!OLC_FOR_AR9280_20_LATER) return; if (OLC_FOR_AR9287_10_LATER) { REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9, AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL); ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0, AR9287_AN_TXPC0_TXPCMODE, AR9287_AN_TXPC0_TXPCMODE_S, AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE); udelay(100); } else { for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++) ah->originalGain[i] = MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4), AR_PHY_TX_GAIN); ah->PDADCdelta = 0; } } static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); if (chan && IS_CHAN_HALF_RATE(chan)) pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); else if (chan && IS_CHAN_QUARTER_RATE(chan)) pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); if (chan && IS_CHAN_5GHZ(chan)) { if (IS_CHAN_A_FAST_CLOCK(ah, chan)) pll = 0x142c; else if (AR_SREV_9280_20(ah)) pll = 0x2850; else pll |= SM(0x28, AR_RTC_9160_PLL_DIV); } else { pll |= SM(0x2c, AR_RTC_9160_PLL_DIV); } return pll; } static void ar9002_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); nfarray[0] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); if (IS_CHAN_HT40(ah->curchan)) nfarray[3] = sign_extend(nf, 9); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) return; nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); nfarray[1] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); if (IS_CHAN_HT40(ah->curchan)) nfarray[4] = sign_extend(nf, 9); } static void ar9002_hw_set_nf_limits(struct ath_hw *ah) { if (AR_SREV_9285(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9285_2GHZ; } else if (AR_SREV_9287(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ; } else if (AR_SREV_9271(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9271_2GHZ; } else { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9280_2GHZ; ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ; ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ; ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9280_5GHZ; } } void ar9002_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); priv_ops->set_rf_regs = NULL; priv_ops->rf_alloc_ext_banks = NULL; priv_ops->rf_free_ext_banks = NULL; priv_ops->rf_set_freq = ar9002_hw_set_channel; priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate; priv_ops->olc_init = ar9002_olc_init; priv_ops->compute_pll_control = ar9002_hw_compute_pll_control; priv_ops->do_getnf = ar9002_hw_do_getnf; ar9002_hw_set_nf_limits(ah); } void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); antconf->main_lna_conf = (regval & AR_PHY_9285_ANT_DIV_MAIN_LNACONF) >> AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S; antconf->alt_lna_conf = (regval & AR_PHY_9285_ANT_DIV_ALT_LNACONF) >> AR_PHY_9285_ANT_DIV_ALT_LNACONF_S; antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >> AR_PHY_9285_FAST_DIV_BIAS_S; } EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_get); void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regval &= ~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF | AR_PHY_9285_ANT_DIV_ALT_LNACONF | AR_PHY_9285_FAST_DIV_BIAS); regval |= ((antconf->main_lna_conf << AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S) & AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regval |= ((antconf->alt_lna_conf << AR_PHY_9285_ANT_DIV_ALT_LNACONF_S) & AR_PHY_9285_ANT_DIV_ALT_LNACONF); regval |= ((antconf->fast_div_bias << AR_PHY_9285_FAST_DIV_BIAS_S) & AR_PHY_9285_FAST_DIV_BIAS); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); } EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_set);
gpl-2.0
vkrmsngh/Test-repo-to-learning-git
u-boot/board/freescale/common/idt8t49n222a_serdes_clk.c
57
5721
/* * Copyright 2013 Freescale Semiconductor, Inc. * Author: Shaveta Leekha <shaveta@freescale.com> * * SPDX-License-Identifier: GPL-2.0+ */ #include "idt8t49n222a_serdes_clk.h" #define DEVICE_ID_REG 0x00 static int check_pll_status(u8 idt_addr) { u8 val = 0; int ret; ret = i2c_read(idt_addr, 0x17, 1, &val, 1); if (ret < 0) { printf("IDT:0x%x could not read status register from device.\n", idt_addr); return ret; } if (val & 0x04) { debug("idt8t49n222a PLL is LOCKED: %x\n", val); } else { printf("idt8t49n222a PLL is not LOCKED: %x\n", val); return -1; } return 0; } int set_serdes_refclk(u8 idt_addr, u8 serdes_num, enum serdes_refclk refclk1, enum serdes_refclk refclk2, u8 feedback) { u8 dev_id = 0; int i, ret; debug("IDT:Configuring idt8t49n222a device at I2C address: 0x%2x\n", idt_addr); ret = i2c_read(idt_addr, DEVICE_ID_REG, 1, &dev_id, 1); if (ret < 0) { debug("IDT:0x%x could not read DEV_ID from device.\n", idt_addr); return ret; } if ((dev_id != 0x00) && (dev_id != 0x24) && (dev_id != 0x2a)) { debug("IDT: device at address 0x%x is not idt8t49n222a.\n", idt_addr); } if (serdes_num != 1 && serdes_num != 2) { debug("serdes_num should be 1 for SerDes1 and" " 2 for SerDes2.\n"); return -1; } if ((refclk1 == SERDES_REFCLK_122_88 && refclk2 != SERDES_REFCLK_122_88) || (refclk1 != SERDES_REFCLK_122_88 && refclk2 == SERDES_REFCLK_122_88)) { debug("Only one refclk at 122.88MHz is not supported." " Please set both refclk1 & refclk2 to 122.88MHz" " or both not to 122.88MHz.\n"); return -1; } if (refclk1 != SERDES_REFCLK_100 && refclk1 != SERDES_REFCLK_122_88 && refclk1 != SERDES_REFCLK_125 && refclk1 != SERDES_REFCLK_156_25) { debug("refclk1 should be 100MHZ, 122.88MHz, 125MHz" " or 156.25MHz.\n"); return -1; } if (refclk2 != SERDES_REFCLK_100 && refclk2 != SERDES_REFCLK_122_88 && refclk2 != SERDES_REFCLK_125 && refclk2 != SERDES_REFCLK_156_25) { debug("refclk2 should be 100MHZ, 122.88MHz, 125MHz" " or 156.25MHz.\n"); return -1; } if (feedback != 0 && feedback != 1) { debug("valid values for feedback are 0(default) or 1.\n"); return -1; } /* Configuring IDT for output refclks as * Refclk1 = 122.88MHz Refclk2 = 122.88MHz */ if (refclk1 == SERDES_REFCLK_122_88 && refclk2 == SERDES_REFCLK_122_88) { printf("Setting refclk1:122.88 and refclk2:122.88\n"); for (i = 0; i < NUM_IDT_REGS; i++) i2c_reg_write(idt_addr, idt_conf_122_88[i][0], idt_conf_122_88[i][1]); if (feedback) { for (i = 0; i < NUM_IDT_REGS_FEEDBACK; i++) i2c_reg_write(idt_addr, idt_conf_122_88_feedback[i][0], idt_conf_122_88_feedback[i][1]); } } if (refclk1 != SERDES_REFCLK_122_88 && refclk2 != SERDES_REFCLK_122_88) { for (i = 0; i < NUM_IDT_REGS; i++) i2c_reg_write(idt_addr, idt_conf_not_122_88[i][0], idt_conf_not_122_88[i][1]); } /* Configuring IDT for output refclks as * Refclk1 = 100MHz Refclk2 = 125MHz */ if (refclk1 == SERDES_REFCLK_100 && refclk2 == SERDES_REFCLK_125) { printf("Setting refclk1:100 and refclk2:125\n"); i2c_reg_write(idt_addr, 0x11, 0x10); } /* Configuring IDT for output refclks as * Refclk1 = 125MHz Refclk2 = 125MHz */ if (refclk1 == SERDES_REFCLK_125 && refclk2 == SERDES_REFCLK_125) { printf("Setting refclk1:125 and refclk2:125\n"); i2c_reg_write(idt_addr, 0x10, 0x10); i2c_reg_write(idt_addr, 0x11, 0x10); } /* Configuring IDT for output refclks as * Refclk1 = 125MHz Refclk2 = 100MHz */ if (refclk1 == SERDES_REFCLK_125 && refclk2 == SERDES_REFCLK_100) { printf("Setting refclk1:125 and refclk2:100\n"); i2c_reg_write(idt_addr, 0x10, 0x10); } /* Configuring IDT for output refclks as * Refclk1 = 156.25MHz Refclk2 = 156.25MHz */ if (refclk1 == SERDES_REFCLK_156_25 && refclk2 == SERDES_REFCLK_156_25) { printf("Setting refclk1:156.25 and refclk2:156.25\n"); for (i = 0; i < NUM_IDT_REGS_156_25; i++) i2c_reg_write(idt_addr, idt_conf_156_25[i][0], idt_conf_156_25[i][1]); } /* Configuring IDT for output refclks as * Refclk1 = 100MHz Refclk2 = 156.25MHz */ if (refclk1 == SERDES_REFCLK_100 && refclk2 == SERDES_REFCLK_156_25) { printf("Setting refclk1:100 and refclk2:156.25\n"); for (i = 0; i < NUM_IDT_REGS_156_25; i++) i2c_reg_write(idt_addr, idt_conf_100_156_25[i][0], idt_conf_100_156_25[i][1]); } /* Configuring IDT for output refclks as * Refclk1 = 125MHz Refclk2 = 156.25MHz */ if (refclk1 == SERDES_REFCLK_125 && refclk2 == SERDES_REFCLK_156_25) { printf("Setting refclk1:125 and refclk2:156.25\n"); for (i = 0; i < NUM_IDT_REGS_156_25; i++) i2c_reg_write(idt_addr, idt_conf_125_156_25[i][0], idt_conf_125_156_25[i][1]); } /* Configuring IDT for output refclks as * Refclk1 = 156.25MHz Refclk2 = 100MHz */ if (refclk1 == SERDES_REFCLK_156_25 && refclk2 == SERDES_REFCLK_100) { printf("Setting refclk1:156.25 and refclk2:100\n"); for (i = 0; i < NUM_IDT_REGS_156_25; i++) i2c_reg_write(idt_addr, idt_conf_156_25_100[i][0], idt_conf_156_25_100[i][1]); } /* Configuring IDT for output refclks as * Refclk1 = 156.25MHz Refclk2 = 125MHz */ if (refclk1 == SERDES_REFCLK_156_25 && refclk2 == SERDES_REFCLK_125) { printf("Setting refclk1:156.25 and refclk2:125\n"); for (i = 0; i < NUM_IDT_REGS_156_25; i++) i2c_reg_write(idt_addr, idt_conf_156_25_125[i][0], idt_conf_156_25_125[i][1]); } /* waiting for maximum of 1 second if PLL doesn'r get locked * initially. then check the status again. */ if (check_pll_status(idt_addr)) { mdelay(1000); if (check_pll_status(idt_addr)) return -1; } return 0; }
gpl-2.0
rdesfo/kernel
block/blk-iopoll.c
313
6060
/* * Functions related to interrupt-poll handling in the block layer. This * is similar to NAPI for network devices. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/blk-iopoll.h> #include <linux/delay.h> #include "blk.h" int blk_iopoll_enabled = 1; EXPORT_SYMBOL(blk_iopoll_enabled); static unsigned int blk_iopoll_budget __read_mostly = 256; static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); /** * blk_iopoll_sched - Schedule a run of the iopoll handler * @iop: The parent iopoll structure * * Description: * Add this blk_iopoll structure to the pending poll list and trigger the * raise of the blk iopoll softirq. The driver must already have gotten a * successful return from blk_iopoll_sched_prep() before calling this. **/ void blk_iopoll_sched(struct blk_iopoll *iop) { unsigned long flags; local_irq_save(flags); list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(blk_iopoll_sched); /** * __blk_iopoll_complete - Mark this @iop as un-polled again * @iop: The parent iopoll structure * * Description: * See blk_iopoll_complete(). This function must be called with interrupts * disabled. **/ void __blk_iopoll_complete(struct blk_iopoll *iop) { list_del(&iop->list); smp_mb__before_clear_bit(); clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(__blk_iopoll_complete); /** * blk_iopoll_complete - Mark this @iop as un-polled again * @iop: The parent iopoll structure * * Description: * If a driver consumes less than the assigned budget in its run of the * iopoll handler, it'll end the polled mode by calling this function. The * iopoll handler will not be invoked again before blk_iopoll_sched_prep() * is called. **/ void blk_iopoll_complete(struct blk_iopoll *iopoll) { unsigned long flags; local_irq_save(flags); __blk_iopoll_complete(iopoll); local_irq_restore(flags); } EXPORT_SYMBOL(blk_iopoll_complete); static void blk_iopoll_softirq(struct softirq_action *h) { struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); int rearm = 0, budget = blk_iopoll_budget; unsigned long start_time = jiffies; local_irq_disable(); while (!list_empty(list)) { struct blk_iopoll *iop; int work, weight; /* * If softirq window is exhausted then punt. */ if (budget <= 0 || time_after(jiffies, start_time)) { rearm = 1; break; } local_irq_enable(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new * entries to the tail of this list, and only ->poll() * calls can remove this head entry from the list. */ iop = list_entry(list->next, struct blk_iopoll, list); weight = iop->weight; work = 0; if (test_bit(IOPOLL_F_SCHED, &iop->state)) work = iop->poll(iop, weight); budget -= work; local_irq_disable(); /* * Drivers must not modify the iopoll state, if they * consume their assigned weight (or more, some drivers can't * easily just stop processing, they have to complete an * entire mask of commands).In such cases this code * still "owns" the iopoll instance and therefore can * move the instance around on the list at-will. */ if (work >= weight) { if (blk_iopoll_disable_pending(iop)) __blk_iopoll_complete(iop); else list_move_tail(&iop->list, list); } } if (rearm) __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); } /** * blk_iopoll_disable - Disable iopoll on this @iop * @iop: The parent iopoll structure * * Description: * Disable io polling and wait for any pending callbacks to have completed. **/ void blk_iopoll_disable(struct blk_iopoll *iop) { set_bit(IOPOLL_F_DISABLE, &iop->state); while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state)) msleep(1); clear_bit(IOPOLL_F_DISABLE, &iop->state); } EXPORT_SYMBOL(blk_iopoll_disable); /** * blk_iopoll_enable - Enable iopoll on this @iop * @iop: The parent iopoll structure * * Description: * Enable iopoll on this @iop. Note that the handler run will not be * scheduled, it will only mark it as active. **/ void blk_iopoll_enable(struct blk_iopoll *iop) { BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); smp_mb__before_clear_bit(); clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(blk_iopoll_enable); /** * blk_iopoll_init - Initialize this @iop * @iop: The parent iopoll structure * @weight: The default weight (or command completion budget) * @poll_fn: The handler to invoke * * Description: * Initialize this blk_iopoll structure. Before being actively used, the * driver must call blk_iopoll_enable(). **/ void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn) { memset(iop, 0, sizeof(*iop)); INIT_LIST_HEAD(&iop->list); iop->weight = weight; iop->poll = poll_fn; set_bit(IOPOLL_F_SCHED, &iop->state); } EXPORT_SYMBOL(blk_iopoll_init); static int blk_iopoll_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; local_irq_disable(); list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); } return NOTIFY_OK; } static struct notifier_block blk_iopoll_cpu_notifier = { .notifier_call = blk_iopoll_cpu_notify, }; static __init int blk_iopoll_setup(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq); register_hotcpu_notifier(&blk_iopoll_cpu_notifier); return 0; } subsys_initcall(blk_iopoll_setup);
gpl-2.0
b8e5n/KTG-kernel_es209ra
arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c
1593
6965
/* arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c * * Verification code for aDSP VFE packets from userspace. * * Copyright (C) 2008 Google, Inc. * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <mach/qdsp5/qdsp5vfecmdi.h> #include "adsp.h" #include <mach/debug_mm.h> static uint32_t size1_y, size2_y, size1_cbcr, size2_cbcr; static uint32_t af_size = 4228; static uint32_t awb_size = 8196; static inline int verify_cmd_op_ack(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { vfe_cmd_op1_ack *cmd = (vfe_cmd_op1_ack *)cmd_data; void **addr_y = (void **)&cmd->op1_buf_y_addr; void **addr_cbcr = (void **)(&cmd->op1_buf_cbcr_addr); if (cmd_size != sizeof(vfe_cmd_op1_ack)) return -1; if ((*addr_y && adsp_pmem_fixup(module, addr_y, size1_y)) || (*addr_cbcr && adsp_pmem_fixup(module, addr_cbcr, size1_cbcr))) return -1; return 0; } static inline int verify_cmd_stats_autofocus_cfg(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { int i; vfe_cmd_stats_autofocus_cfg *cmd = (vfe_cmd_stats_autofocus_cfg *)cmd_data; if (cmd_size != sizeof(vfe_cmd_stats_autofocus_cfg)) return -1; for (i = 0; i < 3; i++) { void **addr = (void **)(&cmd->af_stats_op_buf[i]); if (*addr && adsp_pmem_fixup(module, addr, af_size)) return -1; } return 0; } static inline int verify_cmd_stats_wb_exp_cfg(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { vfe_cmd_stats_wb_exp_cfg *cmd = (vfe_cmd_stats_wb_exp_cfg *)cmd_data; int i; if (cmd_size != sizeof(vfe_cmd_stats_wb_exp_cfg)) return -1; for (i = 0; i < 3; i++) { void **addr = (void **)(&cmd->wb_exp_stats_op_buf[i]); if (*addr && adsp_pmem_fixup(module, addr, awb_size)) return -1; } return 0; } static inline int verify_cmd_stats_af_ack(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { vfe_cmd_stats_af_ack *cmd = (vfe_cmd_stats_af_ack *)cmd_data; void **addr = (void **)&cmd->af_stats_op_buf; if (cmd_size != sizeof(vfe_cmd_stats_af_ack)) return -1; if (*addr && adsp_pmem_fixup(module, addr, af_size)) return -1; return 0; } static inline int verify_cmd_stats_wb_exp_ack(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { vfe_cmd_stats_wb_exp_ack *cmd = (vfe_cmd_stats_wb_exp_ack *)cmd_data; void **addr = (void **)&cmd->wb_exp_stats_op_buf; if (cmd_size != sizeof(vfe_cmd_stats_wb_exp_ack)) return -1; if (*addr && adsp_pmem_fixup(module, addr, awb_size)) return -1; return 0; } static int verify_vfe_command(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; switch (cmd_id) { case VFE_CMD_OP1_ACK: return verify_cmd_op_ack(module, cmd_data, cmd_size); case VFE_CMD_OP2_ACK: return verify_cmd_op_ack(module, cmd_data, cmd_size); case VFE_CMD_STATS_AUTOFOCUS_CFG: return verify_cmd_stats_autofocus_cfg(module, cmd_data, cmd_size); case VFE_CMD_STATS_WB_EXP_CFG: return verify_cmd_stats_wb_exp_cfg(module, cmd_data, cmd_size); case VFE_CMD_STATS_AF_ACK: return verify_cmd_stats_af_ack(module, cmd_data, cmd_size); case VFE_CMD_STATS_WB_EXP_ACK: return verify_cmd_stats_wb_exp_ack(module, cmd_data, cmd_size); default: if (cmd_id > 29) { MM_ERR("module %s: invalid VFE command id %d\n", module->name, cmd_id); return -1; } } return 0; } static int verify_vfe_command_scale(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; // FIXME: check the size if (cmd_id > 1) { MM_ERR("module %s: invalid VFE SCALE command id %d\n", module->name, cmd_id); return -1; } return 0; } static uint32_t get_size(uint32_t hw) { uint32_t height, width; uint32_t height_mask = 0x3ffc; uint32_t width_mask = 0x3ffc000; height = (hw & height_mask) >> 2; width = (hw & width_mask) >> 14 ; return height * width; } static int verify_vfe_command_table(struct msm_adsp_module *module, void *cmd_data, size_t cmd_size) { uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; int i; switch (cmd_id) { case VFE_CMD_AXI_IP_CFG: { vfe_cmd_axi_ip_cfg *cmd = (vfe_cmd_axi_ip_cfg *)cmd_data; uint32_t size; if (cmd_size != sizeof(vfe_cmd_axi_ip_cfg)) { MM_ERR("module %s: invalid VFE TABLE \ (VFE_CMD_AXI_IP_CFG) command size %d\n", module->name, cmd_size); return -1; } size = get_size(cmd->ip_cfg_part2); for (i = 0; i < 8; i++) { void **addr = (void **) &cmd->ip_buf_addr[i]; if (*addr && adsp_pmem_fixup(module, addr, size)) return -1; } } case VFE_CMD_AXI_OP_CFG: { vfe_cmd_axi_op_cfg *cmd = (vfe_cmd_axi_op_cfg *)cmd_data; void **addr1_y, **addr2_y, **addr1_cbcr, **addr2_cbcr; if (cmd_size != sizeof(vfe_cmd_axi_op_cfg)) { MM_ERR("module %s: invalid VFE TABLE \ (VFE_CMD_AXI_OP_CFG) command size %d\n", module->name, cmd_size); return -1; } size1_y = get_size(cmd->op1_y_cfg_part2); size1_cbcr = get_size(cmd->op1_cbcr_cfg_part2); size2_y = get_size(cmd->op2_y_cfg_part2); size2_cbcr = get_size(cmd->op2_cbcr_cfg_part2); for (i = 0; i < 8; i++) { addr1_y = (void **)(&cmd->op1_buf1_addr[2*i]); addr1_cbcr = (void **)(&cmd->op1_buf1_addr[2*i+1]); addr2_y = (void **)(&cmd->op2_buf1_addr[2*i]); addr2_cbcr = (void **)(&cmd->op2_buf1_addr[2*i+1]); /* printk("module %s: [%d] %p %p %p %p\n", module->name, i, *addr1_y, *addr1_cbcr, *addr2_y, *addr2_cbcr); */ if ((*addr1_y && adsp_pmem_fixup(module, addr1_y, size1_y)) || (*addr1_cbcr && adsp_pmem_fixup(module, addr1_cbcr, size1_cbcr)) || (*addr2_y && adsp_pmem_fixup(module, addr2_y, size2_y)) || (*addr2_cbcr && adsp_pmem_fixup(module, addr2_cbcr, size2_cbcr))) return -1; } } default: if (cmd_id > 4) { MM_ERR("module %s: invalid VFE TABLE command \ id %d\n", module->name, cmd_id); return -1; } } return 0; } int adsp_vfe_verify_cmd(struct msm_adsp_module *module, unsigned int queue_id, void *cmd_data, size_t cmd_size) { switch (queue_id) { case QDSP_vfeCommandQueue: return verify_vfe_command(module, cmd_data, cmd_size); case QDSP_vfeCommandScaleQueue: return verify_vfe_command_scale(module, cmd_data, cmd_size); case QDSP_vfeCommandTableQueue: return verify_vfe_command_table(module, cmd_data, cmd_size); default: MM_ERR("module %s: unknown queue id %d\n", module->name, queue_id); return -1; } }
gpl-2.0
XXMrHyde/android_kernel_moto_shamu
drivers/net/wireless/rt2x00/rt2500usb.c
2105
62869
/* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Module: rt2500usb Abstract: rt2500usb device specific routines. Supported chipsets: RT2570. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt2500usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2500usb_register_read and rt2500usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * If the csr_mutex is already held then the _lock variants must * be used instead. */ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { __le16 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); *value = le16_to_cpu(reg); } static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { __le16 reg; rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); *value = le16_to_cpu(reg); } static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, value, length, REGISTER_TIMEOUT16(length)); } static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); } static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); } static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, value, length, REGISTER_TIMEOUT16(length)); } static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, struct rt2x00_field16 field, u16 *reg) { unsigned int i; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2500usb_register_read_lock(rt2x00dev, offset, reg); if (!rt2x00_get_field16(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } #define WAIT_FOR_BBP(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_DATA, value); rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 0); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word, u8 *value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); if (WAIT_FOR_BBP(rt2x00dev, &reg)) rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, &reg); } *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR9_RF_VALUE, value); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); reg = 0; rt2x00_set_field16(&reg, PHY_CSR10_RF_VALUE, value >> 16); rt2x00_set_field16(&reg, PHY_CSR10_RF_NUMBER_OF_BITS, 20); rt2x00_set_field16(&reg, PHY_CSR10_RF_IF_SELECT, 0); rt2x00_set_field16(&reg, PHY_CSR10_RF_BUSY, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static void _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 *value) { rt2500usb_register_read(rt2x00dev, offset, (u16 *)value); } static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { rt2500usb_register_write(rt2x00dev, offset, value); } static const struct rt2x00debug rt2500usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = _rt2500usb_register_read, .write = _rt2500usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u16), .word_count = CSR_REG_SIZE / sizeof(u16), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2500usb_bbp_read, .write = rt2500usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2500usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); return rt2x00_get_field16(reg, MAC_CSR19_VAL7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2500usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u16 reg; rt2500usb_register_read(led->rt2x00dev, MAC_CSR20, &reg); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field16(&reg, MAC_CSR20_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY, enabled); rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg); } static int rt2500usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u16 reg; rt2500usb_register_read(led->rt2x00dev, MAC_CSR21, &reg); rt2x00_set_field16(&reg, MAC_CSR21_ON_PERIOD, *delay_on); rt2x00_set_field16(&reg, MAC_CSR21_OFF_PERIOD, *delay_off); rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg); return 0; } static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2500usb_brightness_set; led->led_dev.blink_set = rt2500usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ /* * rt2500usb does not differentiate between shared and pairwise * keys, so we should use the same function for both key types. */ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { u32 mask; u16 reg; enum cipher curr_cipher; if (crypto->cmd == SET_KEY) { /* * Disallow to set WEP key other than with index 0, * it is known that not work at least on some hardware. * SW crypto will be used in that case. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && key->keyidx != 0) return -EOPNOTSUPP; /* * Pairwise key will always be entry 0, but this * could collide with a shared key on the same * position... */ mask = TXRX_CSR0_KEY_ID.bit_mask; rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM); reg &= mask; if (reg && reg == mask) return -ENOSPC; reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); key->hw_key_idx += reg ? ffz(reg) : 0; /* * Hardware requires that all keys use the same cipher * (e.g. TKIP-only, AES-only, but not TKIP+AES). * If this is not the first key, compare the cipher with the * first one and fall back to SW crypto if not the same. */ if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher) return -EOPNOTSUPP; rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx), crypto->key, sizeof(crypto->key)); /* * The driver does not support the IV/EIV generation * in hardware. However it demands the data to be provided * both separately as well as inside the frame. * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib * to ensure rt2x00lib will not strip the data from the * frame after the copy, now we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; } /* * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate * a particular key is valid. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, crypto->cipher); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); if (crypto->cmd == SET_KEY) mask |= 1 << key->hw_key_idx; else if (crypto->cmd == DISABLE_KEY) mask &= ~(1 << key->hw_key_idx); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, mask); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); return 0; } static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u16 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, !(filter_flags & FIF_PROMISC_IN_BSS)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS, !(filter_flags & FIF_PROMISC_IN_BSS) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_BROADCAST, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); } static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { unsigned int bcn_preload; u16 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg); rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 2 * (conf->type != NL80211_IFTYPE_STATION)); rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); /* * Enable synchronisation. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); rt2x00_set_field16(&reg, TXRX_CSR18_OFFSET, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } if (flags & CONFIG_UPDATE_MAC) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, (3 * sizeof(__le16))); if (flags & CONFIG_UPDATE_BSSID) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid, (3 * sizeof(__le16))); } static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u16 reg; if (changed & BSS_CHANGED_ERP_PREAMBLE) { rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg); rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); } } static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r2; u8 r14; u16 csr5; u16 csr6; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); rt2500usb_bbp_read(rt2x00dev, 2, &r2); rt2500usb_bbp_read(rt2x00dev, 14, &r14); rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5); rt2500usb_register_read(rt2x00dev, PHY_CSR6, &csr6); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); break; case ANTENNA_A: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); break; } /* * RT2525E and RT5222 need to flip TX I/Q */ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); /* * RT2525E does not need RX I/Q Flip. */ if (rt2x00_rf(rt2x00dev, RF2525E)) rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); } else { rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); } rt2500usb_bbp_write(rt2x00dev, 2, r2); rt2500usb_bbp_write(rt2x00dev, 14, r14); rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); } static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { /* * Set TXpower. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); /* * For RT2525E we should first set the channel to half band higher. */ if (rt2x00_rf(rt2x00dev, RF2525E)) { static const u32 vals[] = { 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, 0x00000902, 0x00000906 }; rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { u32 rf3; rt2x00_rf_read(rt2x00dev, 3, &rf3); rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2500usb_rf_write(rt2x00dev, 3, rf3); } static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u16 reg; if (state == STATE_SLEEP) { rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, rt2x00dev->beacon_int - 20); rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } else { rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2500usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt2500usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_PS) rt2500usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 reg; /* * Update FCS error count from register. */ rt2500usb_register_read(rt2x00dev, STA_CSR0, &reg); qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ rt2500usb_register_read(rt2x00dev, STA_CSR3, &reg); qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); } static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 eeprom; u16 value; rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); rt2500usb_bbp_write(rt2x00dev, 24, value); rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); rt2500usb_bbp_write(rt2x00dev, 25, value); rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); rt2500usb_bbp_write(rt2x00dev, 61, value); rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); rt2500usb_bbp_write(rt2x00dev, 17, value); qual->vgc_level = value; } /* * Queue handlers. */ static void rt2500usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } static void rt2500usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } /* * Initialization functions. */ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, USB_MODE_TEST, REGISTER_TIMEOUT); rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, 0x00f0, REGISTER_TIMEOUT); rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); rt2500usb_register_read(rt2x00dev, MAC_CSR1, &reg); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); rt2500usb_register_read(rt2x00dev, MAC_CSR1, &reg); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR5, &reg); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0, 13); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1, 12); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR6, &reg); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0, 10); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1, 11); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR7, &reg); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0, 7); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1, 6); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR8, &reg); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0, 5); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1, 0); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1_VALID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2500usb_register_read(rt2x00dev, MAC_CSR1, &reg); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) { rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg); rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); } else { reg = 0; rt2x00_set_field16(&reg, PHY_CSR2_LNA, 1); rt2x00_set_field16(&reg, PHY_CSR2_LNA_MODE, 3); } rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); rt2500usb_register_read(rt2x00dev, MAC_CSR8, &reg); rt2x00_set_field16(&reg, MAC_CSR8_MAX_FRAME_UNIT, rt2x00dev->rx->data_size); rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, CIPHER_NONE); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, 90); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2500usb_register_read(rt2x00dev, PHY_CSR4, &reg); rt2x00_set_field16(&reg, PHY_CSR4_LOW_RF_LE, 1); rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR1, &reg); rt2x00_set_field16(&reg, TXRX_CSR1_AUTO_SEQUENCE, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); return 0; } static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2500usb_bbp_read(rt2x00dev, 0, &value); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 value; u8 reg_id; if (unlikely(rt2500usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2500usb_bbp_write(rt2x00dev, 3, 0x02); rt2500usb_bbp_write(rt2x00dev, 4, 0x19); rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); rt2500usb_bbp_write(rt2x00dev, 15, 0x30); rt2500usb_bbp_write(rt2x00dev, 16, 0xac); rt2500usb_bbp_write(rt2x00dev, 18, 0x18); rt2500usb_bbp_write(rt2x00dev, 19, 0xff); rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); rt2500usb_bbp_write(rt2x00dev, 21, 0x08); rt2500usb_bbp_write(rt2x00dev, 22, 0x08); rt2500usb_bbp_write(rt2x00dev, 23, 0x08); rt2500usb_bbp_write(rt2x00dev, 24, 0x80); rt2500usb_bbp_write(rt2x00dev, 25, 0x50); rt2500usb_bbp_write(rt2x00dev, 26, 0x08); rt2500usb_bbp_write(rt2x00dev, 27, 0x23); rt2500usb_bbp_write(rt2x00dev, 30, 0x10); rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); rt2500usb_bbp_write(rt2x00dev, 34, 0x12); rt2500usb_bbp_write(rt2x00dev, 35, 0x50); rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); rt2500usb_bbp_write(rt2x00dev, 40, 0x02); rt2500usb_bbp_write(rt2x00dev, 41, 0x60); rt2500usb_bbp_write(rt2x00dev, 53, 0x10); rt2500usb_bbp_write(rt2x00dev, 54, 0x18); rt2500usb_bbp_write(rt2x00dev, 56, 0x08); rt2500usb_bbp_write(rt2x00dev, 57, 0x10); rt2500usb_bbp_write(rt2x00dev, 58, 0x08); rt2500usb_bbp_write(rt2x00dev, 61, 0x60); rt2500usb_bbp_write(rt2x00dev, 62, 0x10); rt2500usb_bbp_write(rt2x00dev, 75, 0xff); for (i = 0; i < EEPROM_BBP_SIZE; i++) { rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2500usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2500usb_init_registers(rt2x00dev) || rt2500usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); /* * Disable synchronisation. */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u16 reg; u16 reg2; unsigned int i; char put_to_sleep; char bbp_state; char rf_state; put_to_sleep = (state != STATE_AWAKE); reg = 0; rt2x00_set_field16(&reg, MAC_CSR17_BBP_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_RF_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); rt2x00_set_field16(&reg, MAC_CSR17_SET_STATE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2500usb_register_read(rt2x00dev, MAC_CSR17, &reg2); bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); msleep(30); } return -EBUSY; } static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2500usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2500usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2500usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2500usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ rt2x00_desc_read(txd, 0, &word); rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); rt2x00_desc_write(txd, 0, word); rt2x00_desc_read(txd, 1, &word); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_desc_write(txd, 1, word); rt2x00_desc_read(txd, 2, &word); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2500usb_beacondone(struct urb *urb); static void rt2500usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); int length; u16 reg, reg0; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt2500usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); /* * USB devices cannot blindly pass the skb->len as the * length of the data to usb_fill_bulk_urb. Pass the skb * to the driver to determine what the length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, entry->skb->data, length, rt2500usb_beacondone, entry); /* * Second we need to create the guardian byte. * We only need a single byte, so lets recycle * the 'flags' field we are not using for beacons. */ bcn_priv->guardian_data = 0; usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe, &bcn_priv->guardian_data, 1, rt2500usb_beacondone, entry); /* * Send out the guardian byte. */ usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); /* * Enable beaconing again. */ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); reg0 = reg; rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); /* * Beacon generation will fail initially. * To prevent this we need to change the TXRX_CSR19 * register several times (reg0 is the same as reg * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0 * and 1 in reg). */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } static int rt2500usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 2, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 2); length += (2 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static void rt2500usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)(entry->skb->data + (entry_priv->urb->actual_length - entry->queue->desc_size)); u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ rt2x00_desc_read(rxd, 0, &word0); rt2x00_desc_read(rxd, 1, &word1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY; if (rxdesc->cipher != CIPHER_NONE) { _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]); _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; /* ICV is located at the end of frame */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Adjust the skb memory window to the frame boundaries. */ skb_trim(entry->skb, rxdesc->size); } /* * Interrupt functions. */ static void rt2500usb_beacondone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) return; /* * Check if this was the guardian beacon, * if that was the case we need to send the real beacon now. * Otherwise we should free the sk_buffer, the device * should be doing the rest of the work now. */ if (bcn_priv->guardian_urb == urb) { usb_submit_urb(bcn_priv->urb, GFP_ATOMIC); } else if (bcn_priv->urb == urb) { dev_kfree_skb(entry->skb); entry->skb = NULL; } } /* * Device probe functions. */ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; u8 bbp; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); if (!is_valid_ether_addr(mac)) { eth_random_addr(mac); rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); } rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, LED_MODE_DEFAULT); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, DEFAULT_RSSI_OFFSET); rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune: 0x%04x\n", word); } /* * Switch lower vgc bound to current BBP R17 value, * lower the value a bit for better quality. */ rt2500usb_bbp_read(rt2x00dev, 17, &bbp); bbp -= 6; rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); } else { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r17: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r24: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r25: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r61: 0x%04x\n", word); } return 0; } static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u16 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF2522) && !rt2x00_rf(rt2x00dev, RF2523) && !rt2x00_rf(rt2x00dev, RF2524) && !rt2x00_rf(rt2x00dev, RF2525) && !rt2x00_rf(rt2x00dev, RF2525E) && !rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. * I am not 100% sure about this, but the legacy drivers do not * indicate antenna swapping in software is required when * diversity is enabled. */ if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read the RSSI <-> dBm offset information. */ rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); rt2x00dev->rssi_offset = rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); return 0; } /* * RF value list for RF2522 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2522[] = { { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, }; /* * RF value list for RF2523 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2523[] = { { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, }; /* * RF value list for RF2524 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2524[] = { { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, }; /* * RF value list for RF2525 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525[] = { { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, }; /* * RF value list for RF2525e * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525e[] = { { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, }; /* * RF value list for RF5222 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5222[] = { { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, /* 802.11 HyperLan 2 */ { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, /* 802.11 UNII */ { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, }; static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; char *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ rt2x00dev->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK; SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2522)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); spec->channels = rf_vals_bg_2522; } else if (rt2x00_rf(rt2x00dev, RF2523)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); spec->channels = rf_vals_bg_2523; } else if (rt2x00_rf(rt2x00dev, RF2524)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); spec->channels = rf_vals_bg_2524; } else if (rt2x00_rf(rt2x00dev, RF2525)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); spec->channels = rf_vals_bg_2525; } else if (rt2x00_rf(rt2x00dev, RF2525E)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); spec->channels = rf_vals_bg_2525e; } else if (rt2x00_rf(rt2x00dev, RF5222)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5222); spec->channels = rf_vals_5222; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = DEFAULT_TXPOWER; } } return 0; } static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u16 reg; /* * Allocate eeprom data. */ retval = rt2500usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2500usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); /* * Initialize hw specifications. */ retval = rt2500usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) { __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_COPY_IV, &rt2x00dev->cap_flags); } __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } static const struct ieee80211_ops rt2500usb_mac80211_ops = { .tx = rt2x00mac_tx, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .probe_hw = rt2500usb_probe_hw, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt2500usb_set_device_state, .rfkill_poll = rt2500usb_rfkill_poll, .link_stats = rt2500usb_link_stats, .reset_tuner = rt2500usb_reset_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt2500usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2500usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt2500usb_write_tx_desc, .write_beacon = rt2500usb_write_beacon, .get_tx_data_len = rt2500usb_get_tx_data_len, .fill_rxdone = rt2500usb_fill_rxdone, .config_shared_key = rt2500usb_config_key, .config_pairwise_key = rt2500usb_config_key, .config_filter = rt2500usb_config_filter, .config_intf = rt2500usb_config_intf, .config_erp = rt2500usb_config_erp, .config_ant = rt2500usb_config_ant, .config = rt2500usb_config, }; static const struct data_queue_desc rt2500usb_queue_rx = { .entry_num = 32, .data_size = DATA_FRAME_SIZE, .desc_size = RXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb), }; static const struct data_queue_desc rt2500usb_queue_tx = { .entry_num = 32, .data_size = DATA_FRAME_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb), }; static const struct data_queue_desc rt2500usb_queue_bcn = { .entry_num = 1, .data_size = MGMT_FRAME_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb_bcn), }; static const struct data_queue_desc rt2500usb_queue_atim = { .entry_num = 8, .data_size = DATA_FRAME_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb), }; static const struct rt2x00_ops rt2500usb_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .extra_tx_headroom = TXD_DESC_SIZE, .rx = &rt2500usb_queue_rx, .tx = &rt2500usb_queue_tx, .bcn = &rt2500usb_queue_bcn, .atim = &rt2500usb_queue_atim, .lib = &rt2500usb_rt2x00_ops, .hw = &rt2500usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2500usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt2500usb module information. */ static struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */ { USB_DEVICE(0x050d, 0x7051) }, /* Cisco Systems */ { USB_DEVICE(0x13b1, 0x000d) }, { USB_DEVICE(0x13b1, 0x0011) }, { USB_DEVICE(0x13b1, 0x001a) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c02) }, /* D-LINK */ { USB_DEVICE(0x2001, 0x3c00) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8001) }, { USB_DEVICE(0x1044, 0x8007) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe000) }, /* Melco */ { USB_DEVICE(0x0411, 0x005e) }, { USB_DEVICE(0x0411, 0x0066) }, { USB_DEVICE(0x0411, 0x0067) }, { USB_DEVICE(0x0411, 0x008b) }, { USB_DEVICE(0x0411, 0x0097) }, /* MSI */ { USB_DEVICE(0x0db0, 0x6861) }, { USB_DEVICE(0x0db0, 0x6865) }, { USB_DEVICE(0x0db0, 0x6869) }, /* Ralink */ { USB_DEVICE(0x148f, 0x1706) }, { USB_DEVICE(0x148f, 0x2570) }, { USB_DEVICE(0x148f, 0x9020) }, /* Sagem */ { USB_DEVICE(0x079b, 0x004b) }, /* Siemens */ { USB_DEVICE(0x0681, 0x3c06) }, /* SMC */ { USB_DEVICE(0x0707, 0xee13) }, /* Spairon */ { USB_DEVICE(0x114b, 0x0110) }, /* SURECOM */ { USB_DEVICE(0x0769, 0x11f3) }, /* Trust */ { USB_DEVICE(0x0eb0, 0x9020) }, /* VTech */ { USB_DEVICE(0x0f88, 0x3012) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0260) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); static int rt2500usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt2500usb_ops); } static struct usb_driver rt2500usb_driver = { .name = KBUILD_MODNAME, .id_table = rt2500usb_device_table, .probe = rt2500usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt2500usb_driver);
gpl-2.0
ElysiumRom/android_kernel_samsung_msm8660-common
arch/alpha/kernel/perf_event.c
2361
22994
/* * Hardware performance events for the Alpha. * * We implement HW counts on the EV67 and subsequent CPUs only. * * (C) 2010 Michael J. Cree * * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and * ARM code, which are copyright by their respective authors. */ #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/kernel.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <linux/init.h> #include <asm/hwrpb.h> #include <asm/atomic.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/pal.h> #include <asm/wrperfmon.h> #include <asm/hw_irq.h> /* The maximum number of PMCs on any Alpha CPU whatsoever. */ #define MAX_HWEVENTS 3 #define PMC_NO_INDEX -1 /* For tracking PMCs and the hw events they monitor on each CPU. */ struct cpu_hw_events { int enabled; /* Number of events scheduled; also number entries valid in arrays below. */ int n_events; /* Number events added since last hw_perf_disable(). */ int n_added; /* Events currently scheduled. */ struct perf_event *event[MAX_HWEVENTS]; /* Event type of each scheduled event. */ unsigned long evtype[MAX_HWEVENTS]; /* Current index of each scheduled event; if not yet determined * contains PMC_NO_INDEX. */ int current_idx[MAX_HWEVENTS]; /* The active PMCs' config for easy use with wrperfmon(). */ unsigned long config; /* The active counters' indices for easy use with wrperfmon(). */ unsigned long idx_mask; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); /* * A structure to hold the description of the PMCs available on a particular * type of Alpha CPU. */ struct alpha_pmu_t { /* Mapping of the perf system hw event types to indigenous event types */ const int *event_map; /* The number of entries in the event_map */ int max_events; /* The number of PMCs on this Alpha */ int num_pmcs; /* * All PMC counters reside in the IBOX register PCTR. This is the * LSB of the counter. */ int pmc_count_shift[MAX_HWEVENTS]; /* * The mask that isolates the PMC bits when the LSB of the counter * is shifted to bit 0. */ unsigned long pmc_count_mask[MAX_HWEVENTS]; /* The maximum period the PMC can count. */ unsigned long pmc_max_period[MAX_HWEVENTS]; /* * The maximum value that may be written to the counter due to * hardware restrictions is pmc_max_period - pmc_left. */ long pmc_left[3]; /* Subroutine for allocation of PMCs. Enforces constraints. */ int (*check_constraints)(struct perf_event **, unsigned long *, int); }; /* * The Alpha CPU PMU description currently in operation. This is set during * the boot process to the specific CPU of the machine. */ static const struct alpha_pmu_t *alpha_pmu; #define HW_OP_UNSUPPORTED -1 /* * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs * follow. Since they are identical we refer to them collectively as the * EV67 henceforth. */ /* * EV67 PMC event types * * There is no one-to-one mapping of the possible hw event types to the * actual codes that are used to program the PMCs hence we introduce our * own hw event type identifiers. */ enum ev67_pmc_event_type { EV67_CYCLES = 1, EV67_INSTRUCTIONS, EV67_BCACHEMISS, EV67_MBOXREPLAY, EV67_LAST_ET }; #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) /* Mapping of the hw event types to the perf tool interface */ static const int ev67_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, }; struct ev67_mapping_t { int config; int idx; }; /* * The mapping used for one event only - these must be in same order as enum * ev67_pmc_event_type definition. */ static const struct ev67_mapping_t ev67_mapping[] = { {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ }; /* * Check that a group of events can be simultaneously scheduled on to the * EV67 PMU. Also allocate counter indices and config. */ static int ev67_check_constraints(struct perf_event **event, unsigned long *evtype, int n_ev) { int idx0; unsigned long config; idx0 = ev67_mapping[evtype[0]-1].idx; config = ev67_mapping[evtype[0]-1].config; if (n_ev == 1) goto success; BUG_ON(n_ev != 2); if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { /* MBOX replay traps must be on PMC 1 */ idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; /* Only cycles can accompany MBOX replay traps */ if (evtype[idx0] == EV67_CYCLES) { config = EV67_PCTR_CYCLES_MBOX; goto success; } } if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { /* Bcache misses must be on PMC 1 */ idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; /* Only instructions can accompany Bcache misses */ if (evtype[idx0] == EV67_INSTRUCTIONS) { config = EV67_PCTR_INSTR_BCACHEMISS; goto success; } } if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { /* Instructions must be on PMC 0 */ idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; /* By this point only cycles can accompany instructions */ if (evtype[idx0^1] == EV67_CYCLES) { config = EV67_PCTR_INSTR_CYCLES; goto success; } } /* Otherwise, darn it, there is a conflict. */ return -1; success: event[0]->hw.idx = idx0; event[0]->hw.config_base = config; if (n_ev == 2) { event[1]->hw.idx = idx0 ^ 1; event[1]->hw.config_base = config; } return 0; } static const struct alpha_pmu_t ev67_pmu = { .event_map = ev67_perfmon_event_map, .max_events = ARRAY_SIZE(ev67_perfmon_event_map), .num_pmcs = 2, .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, .pmc_left = {16, 4, 0}, .check_constraints = ev67_check_constraints }; /* * Helper routines to ensure that we read/write only the correct PMC bits * when calling the wrperfmon PALcall. */ static inline void alpha_write_pmc(int idx, unsigned long val) { val &= alpha_pmu->pmc_count_mask[idx]; val <<= alpha_pmu->pmc_count_shift[idx]; val |= (1<<idx); wrperfmon(PERFMON_CMD_WRITE, val); } static inline unsigned long alpha_read_pmc(int idx) { unsigned long val; val = wrperfmon(PERFMON_CMD_READ, 0); val >>= alpha_pmu->pmc_count_shift[idx]; val &= alpha_pmu->pmc_count_mask[idx]; return val; } /* Set a new period to sample over */ static int alpha_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { long left = local64_read(&hwc->period_left); long period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } /* * Hardware restrictions require that the counters must not be * written with values that are too close to the maximum period. */ if (unlikely(left < alpha_pmu->pmc_left[idx])) left = alpha_pmu->pmc_left[idx]; if (left > (long)alpha_pmu->pmc_max_period[idx]) left = alpha_pmu->pmc_max_period[idx]; local64_set(&hwc->prev_count, (unsigned long)(-left)); alpha_write_pmc(idx, (unsigned long)(-left)); perf_event_update_userpage(event); return ret; } /* * Calculates the count (the 'delta') since the last time the PMC was read. * * As the PMCs' full period can easily be exceeded within the perf system * sampling period we cannot use any high order bits as a guard bit in the * PMCs to detect overflow as is done by other architectures. The code here * calculates the delta on the basis that there is no overflow when ovf is * zero. The value passed via ovf by the interrupt handler corrects for * overflow. * * This can be racey on rare occasions -- a call to this routine can occur * with an overflowed counter just before the PMI service routine is called. * The check for delta negative hopefully always rectifies this situation. */ static unsigned long alpha_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx, long ovf) { long prev_raw_count, new_raw_count; long delta; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = alpha_read_pmc(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; /* It is possible on very rare occasions that the PMC has overflowed * but the interrupt is yet to come. Detect and fix this situation. */ if (unlikely(delta < 0)) { delta += alpha_pmu->pmc_max_period[idx] + 1; } local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } /* * Collect all HW events into the array event[]. */ static int collect_events(struct perf_event *group, int max_count, struct perf_event *event[], unsigned long *evtype, int *current_idx) { struct perf_event *pe; int n = 0; if (!is_software_event(group)) { if (n >= max_count) return -1; event[n] = group; evtype[n] = group->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } list_for_each_entry(pe, &group->sibling_list, group_entry) { if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; event[n] = pe; evtype[n] = pe->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } } return n; } /* * Check that a group of events can be simultaneously scheduled on to the PMU. */ static int alpha_check_constraints(struct perf_event **events, unsigned long *evtypes, int n_ev) { /* No HW events is possible from hw_perf_group_sched_in(). */ if (n_ev == 0) return 0; if (n_ev > alpha_pmu->num_pmcs) return -1; return alpha_pmu->check_constraints(events, evtypes, n_ev); } /* * If new events have been scheduled then update cpuc with the new * configuration. This may involve shifting cycle counts from one PMC to * another. */ static void maybe_change_configuration(struct cpu_hw_events *cpuc) { int j; if (cpuc->n_added == 0) return; /* Find counters that are moving to another PMC and update */ for (j = 0; j < cpuc->n_events; j++) { struct perf_event *pe = cpuc->event[j]; if (cpuc->current_idx[j] != PMC_NO_INDEX && cpuc->current_idx[j] != pe->hw.idx) { alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); cpuc->current_idx[j] = PMC_NO_INDEX; } } /* Assign to counters all unassigned events. */ cpuc->idx_mask = 0; for (j = 0; j < cpuc->n_events; j++) { struct perf_event *pe = cpuc->event[j]; struct hw_perf_event *hwc = &pe->hw; int idx = hwc->idx; if (cpuc->current_idx[j] == PMC_NO_INDEX) { alpha_perf_event_set_period(pe, hwc, idx); cpuc->current_idx[j] = idx; } if (!(hwc->state & PERF_HES_STOPPED)) cpuc->idx_mask |= (1<<cpuc->current_idx[j]); } cpuc->config = cpuc->event[0]->hw.config_base; } /* Schedule perf HW event on to PMU. * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ static int alpha_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int n0; int ret; unsigned long irq_flags; /* * The Sparc code has the IRQ disable first followed by the perf * disable, however this can lead to an overflowed counter with the * PMI disabled on rare occasions. The alpha_perf_event_update() * routine should detect this situation by noting a negative delta, * nevertheless we disable the PMCs first to enable a potential * final PMI to occur before we disable interrupts. */ perf_pmu_disable(event->pmu); local_irq_save(irq_flags); /* Default to error to be returned */ ret = -EAGAIN; /* Insert event on to PMU and if successful modify ret to valid return */ n0 = cpuc->n_events; if (n0 < alpha_pmu->num_pmcs) { cpuc->event[n0] = event; cpuc->evtype[n0] = event->hw.event_base; cpuc->current_idx[n0] = PMC_NO_INDEX; if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { cpuc->n_events++; cpuc->n_added++; ret = 0; } } hwc->state = PERF_HES_UPTODATE; if (!(flags & PERF_EF_START)) hwc->state |= PERF_HES_STOPPED; local_irq_restore(irq_flags); perf_pmu_enable(event->pmu); return ret; } /* Disable performance monitoring unit * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ static void alpha_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; unsigned long irq_flags; int j; perf_pmu_disable(event->pmu); local_irq_save(irq_flags); for (j = 0; j < cpuc->n_events; j++) { if (event == cpuc->event[j]) { int idx = cpuc->current_idx[j]; /* Shift remaining entries down into the existing * slot. */ while (++j < cpuc->n_events) { cpuc->event[j - 1] = cpuc->event[j]; cpuc->evtype[j - 1] = cpuc->evtype[j]; cpuc->current_idx[j - 1] = cpuc->current_idx[j]; } /* Absorb the final count and turn off the event. */ alpha_perf_event_update(event, hwc, idx, 0); perf_event_update_userpage(event); cpuc->idx_mask &= ~(1UL<<idx); cpuc->n_events--; break; } } local_irq_restore(irq_flags); perf_pmu_enable(event->pmu); } static void alpha_pmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; alpha_perf_event_update(event, hwc, hwc->idx, 0); } static void alpha_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!(hwc->state & PERF_HES_STOPPED)) { cpuc->idx_mask &= ~(1UL<<hwc->idx); hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { alpha_perf_event_update(event, hwc, hwc->idx, 0); hwc->state |= PERF_HES_UPTODATE; } if (cpuc->enabled) wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); } static void alpha_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); alpha_perf_event_set_period(event, hwc, hwc->idx); } hwc->state = 0; cpuc->idx_mask |= 1UL<<hwc->idx; if (cpuc->enabled) wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); } /* * Check that CPU performance counters are supported. * - currently support EV67 and later CPUs. * - actually some later revisions of the EV6 have the same PMC model as the * EV67 but we don't do suffiently deep CPU detection to detect them. * Bad luck to the very few people who might have one, I guess. */ static int supported_cpu(void) { struct percpu_struct *cpu; unsigned long cputype; /* Get cpu type from HW */ cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); cputype = cpu->type & 0xffffffff; /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); } static void hw_perf_event_destroy(struct perf_event *event) { /* Nothing to be done! */ return; } static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; struct perf_event *evts[MAX_HWEVENTS]; unsigned long evtypes[MAX_HWEVENTS]; int idx_rubbish_bin[MAX_HWEVENTS]; int ev; int n; /* We only support a limited range of HARDWARE event types with one * only programmable via a RAW event type. */ if (attr->type == PERF_TYPE_HARDWARE) { if (attr->config >= alpha_pmu->max_events) return -EINVAL; ev = alpha_pmu->event_map[attr->config]; } else if (attr->type == PERF_TYPE_HW_CACHE) { return -EOPNOTSUPP; } else if (attr->type == PERF_TYPE_RAW) { ev = attr->config & 0xff; } else { return -EOPNOTSUPP; } if (ev < 0) { return ev; } /* The EV67 does not support mode exclusion */ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv || attr->exclude_idle) { return -EPERM; } /* * We place the event type in event_base here and leave calculation * of the codes to programme the PMU for alpha_pmu_enable() because * it is only then we will know what HW events are actually * scheduled on to the PMU. At that point the code to programme the * PMU is put into config_base and the PMC to use is placed into * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that * it is yet to be determined. */ hwc->event_base = ev; /* Collect events in a group together suitable for calling * alpha_check_constraints() to verify that the group as a whole can * be scheduled on to the PMU. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, alpha_pmu->num_pmcs - 1, evts, evtypes, idx_rubbish_bin); if (n < 0) return -EINVAL; } evtypes[n] = hwc->event_base; evts[n] = event; if (alpha_check_constraints(evts, evtypes, n + 1)) return -EINVAL; /* Indicate that PMU config and idx are yet to be determined. */ hwc->config_base = 0; hwc->idx = PMC_NO_INDEX; event->destroy = hw_perf_event_destroy; /* * Most architectures reserve the PMU for their use at this point. * As there is no existing mechanism to arbitrate usage and there * appears to be no other user of the Alpha PMU we just assume * that we can just use it, hence a NO-OP here. * * Maybe an alpha_reserve_pmu() routine should be implemented but is * anything else ever going to use it? */ if (!hwc->sample_period) { hwc->sample_period = alpha_pmu->pmc_max_period[0]; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } return 0; } /* * Main entry point to initialise a HW performance event. */ static int alpha_pmu_event_init(struct perf_event *event) { int err; switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break; default: return -ENOENT; } if (!alpha_pmu) return -ENODEV; /* Do the real initialisation work. */ err = __hw_perf_event_init(event); return err; } /* * Main entry point - enable HW performance counters. */ static void alpha_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->enabled) return; cpuc->enabled = 1; barrier(); if (cpuc->n_events > 0) { /* Update cpuc with information from any new scheduled events. */ maybe_change_configuration(cpuc); /* Start counting the desired events. */ wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); } } /* * Main entry point - disable HW performance counters. */ static void alpha_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!cpuc->enabled) return; cpuc->enabled = 0; cpuc->n_added = 0; wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); } static struct pmu pmu = { .pmu_enable = alpha_pmu_enable, .pmu_disable = alpha_pmu_disable, .event_init = alpha_pmu_event_init, .add = alpha_pmu_add, .del = alpha_pmu_del, .start = alpha_pmu_start, .stop = alpha_pmu_stop, .read = alpha_pmu_read, }; /* * Main entry point - don't know when this is called but it * obviously dumps debug info. */ void perf_event_print_debug(void) { unsigned long flags; unsigned long pcr; int pcr0, pcr1; int cpu; if (!supported_cpu()) return; local_irq_save(flags); cpu = smp_processor_id(); pcr = wrperfmon(PERFMON_CMD_READ, 0); pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); local_irq_restore(flags); } /* * Performance Monitoring Interrupt Service Routine called when a PMC * overflows. The PMC that overflowed is passed in la_ptr. */ static void alpha_perf_event_irq_handler(unsigned long la_ptr, struct pt_regs *regs) { struct cpu_hw_events *cpuc; struct perf_sample_data data; struct perf_event *event; struct hw_perf_event *hwc; int idx, j; __get_cpu_var(irq_pmi_count)++; cpuc = &__get_cpu_var(cpu_hw_events); /* Completely counting through the PMC's period to trigger a new PMC * overflow interrupt while in this interrupt routine is utterly * disastrous! The EV6 and EV67 counters are sufficiently large to * prevent this but to be really sure disable the PMCs. */ wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); /* la_ptr is the counter that overflowed. */ if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { /* This should never occur! */ irq_err_count++; pr_warning("PMI: silly index %ld\n", la_ptr); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } idx = la_ptr; perf_sample_data_init(&data, 0); for (j = 0; j < cpuc->n_events; j++) { if (cpuc->current_idx[j] == idx) break; } if (unlikely(j == cpuc->n_events)) { /* This can occur if the event is disabled right on a PMC overflow. */ wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } event = cpuc->event[j]; if (unlikely(!event)) { /* This should never occur! */ irq_err_count++; pr_warning("PMI: No event at index %d!\n", idx); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } hwc = &event->hw; alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); data.period = event->hw.last_period; if (alpha_perf_event_set_period(event, hwc, idx)) { if (perf_event_overflow(event, 1, &data, regs)) { /* Interrupts coming too quickly; "throttle" the * counter, i.e., disable it for a little while. */ alpha_pmu_stop(event, 0); } } wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } /* * Init call to initialise performance events at kernel startup. */ int __init init_hw_perf_events(void) { pr_info("Performance events: "); if (!supported_cpu()) { pr_cont("No support for your CPU.\n"); return 0; } pr_cont("Supported CPU type!\n"); /* Override performance counter IRQ vector */ perf_irq = alpha_perf_event_irq_handler; /* And set up PMU specification */ alpha_pmu = &ev67_pmu; perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); return 0; } early_initcall(init_hw_perf_events);
gpl-2.0
lilbowza1985/s6eng2
drivers/staging/vt6656/tether.c
2617
1605
/* * Copyright (c) 2003 VIA Networking, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: tether.c * * Purpose: * * Author: Tevin Chen * * Date: May 21, 1996 * * Functions: * ETHbIsBufferCrc32Ok - Check CRC value of the buffer if Ok or not * * Revision History: * */ #include "device.h" #include "tmacro.h" #include "tcrc.h" #include "tether.h" /* * Description: Check CRC value of the buffer if Ok or not * * Parameters: * In: * pbyBuffer - pointer of buffer (normally is rx buffer) * cbFrameLength - length of buffer, including CRC portion * Out: * none * * Return Value: true if ok; false if error. * */ bool ETHbIsBufferCrc32Ok(u8 * pbyBuffer, unsigned int cbFrameLength) { u32 dwCRC; dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4); if (cpu_to_le32(*((u32 *)(pbyBuffer + cbFrameLength - 4))) != dwCRC) return false; return true; }
gpl-2.0
cubieboard/linux-sdk-kernel-source
arch/x86/kernel/apm_32.c
4665
70983
/* -*- linux-c -*- * APM BIOS driver for Linux * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) * * Initial development of this driver was funded by NEC Australia P/L * and NEC Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * October 1995, Rik Faith (faith@cs.unc.edu): * Minor enhancements and updates (to the patch set) for 1.3.x * Documentation * January 1996, Rik Faith (faith@cs.unc.edu): * Make /proc/apm easy to format (bump driver version) * March 1996, Rik Faith (faith@cs.unc.edu): * Prohibit APM BIOS calls unless apm_enabled. * (Thanks to Ulrich Windl <Ulrich.Windl@rz.uni-regensburg.de>) * April 1996, Stephen Rothwell (sfr@canb.auug.org.au) * Version 1.0 and 1.1 * May 1996, Version 1.2 * Feb 1998, Version 1.3 * Feb 1998, Version 1.4 * Aug 1998, Version 1.5 * Sep 1998, Version 1.6 * Nov 1998, Version 1.7 * Jan 1999, Version 1.8 * Jan 1999, Version 1.9 * Oct 1999, Version 1.10 * Nov 1999, Version 1.11 * Jan 2000, Version 1.12 * Feb 2000, Version 1.13 * Nov 2000, Version 1.14 * Oct 2001, Version 1.15 * Jan 2002, Version 1.16 * Oct 2002, Version 1.16ac * * History: * 0.6b: first version in official kernel, Linux 1.3.46 * 0.7: changed /proc/apm format, Linux 1.3.58 * 0.8: fixed gcc 2.7.[12] compilation problems, Linux 1.3.59 * 0.9: only call bios if bios is present, Linux 1.3.72 * 1.0: use fixed device number, consolidate /proc/apm into this file, * Linux 1.3.85 * 1.1: support user-space standby and suspend, power off after system * halted, Linux 1.3.98 * 1.2: When resetting RTC after resume, take care so that the time * is only incorrect by 30-60mS (vs. 1S previously) (Gabor J. Toth * <jtoth@princeton.edu>); improve interaction between * screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4 * 1.2a:Simple change to stop mysterious bug reports with SMP also added * levels to the printk calls. APM is not defined for SMP machines. * The new replacement for it is, but Linux doesn't yet support this. * Alan Cox Linux 2.1.55 * 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's * 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by * Dean Gaudet <dgaudet@arctic.org>. * C. Scott Ananian <cananian@alumni.princeton.edu> Linux 2.1.87 * 1.5: Fix segment register reloading (in case of bad segments saved * across BIOS call). * Stephen Rothwell * 1.6: Cope with compiler/assembler differences. * Only try to turn off the first display device. * Fix OOPS at power off with no APM BIOS by Jan Echternach * <echter@informatik.uni-rostock.de> * Stephen Rothwell * 1.7: Modify driver's cached copy of the disabled/disengaged flags * to reflect current state of APM BIOS. * Chris Rankin <rankinc@bellsouth.net> * Reset interrupt 0 timer to 100Hz after suspend * Chad Miller <cmiller@surfsouth.com> * Add CONFIG_APM_IGNORE_SUSPEND_BOUNCE * Richard Gooch <rgooch@atnf.csiro.au> * Allow boot time disabling of APM * Make boot messages far less verbose by default * Make asm safer * Stephen Rothwell * 1.8: Add CONFIG_APM_RTC_IS_GMT * Richard Gooch <rgooch@atnf.csiro.au> * change APM_NOINTS to CONFIG_APM_ALLOW_INTS * remove dependency on CONFIG_PROC_FS * Stephen Rothwell * 1.9: Fix small typo. <laslo@wodip.opole.pl> * Try to cope with BIOS's that need to have all display * devices blanked and not just the first one. * Ross Paterson <ross@soi.city.ac.uk> * Fix segment limit setting it has always been wrong as * the segments needed to have byte granularity. * Mark a few things __init. * Add hack to allow power off of SMP systems by popular request. * Use CONFIG_SMP instead of __SMP__ * Ignore BOUNCES for three seconds. * Stephen Rothwell * 1.10: Fix for Thinkpad return code. * Merge 2.2 and 2.3 drivers. * Remove APM dependencies in arch/i386/kernel/process.c * Remove APM dependencies in drivers/char/sysrq.c * Reset time across standby. * Allow more inititialisation on SMP. * Remove CONFIG_APM_POWER_OFF and make it boot time * configurable (default on). * Make debug only a boot time parameter (remove APM_DEBUG). * Try to blank all devices on any error. * 1.11: Remove APM dependencies in drivers/char/console.c * Check nr_running to detect if we are idle (from * Borislav Deianov <borislav@lix.polytechnique.fr>) * Fix for bioses that don't zero the top part of the * entrypoint offset (Mario Sitta <sitta@al.unipmn.it>) * (reported by Panos Katsaloulis <teras@writeme.com>). * Real mode power off patch (Walter Hofmann * <Walter.Hofmann@physik.stud.uni-erlangen.de>). * 1.12: Remove CONFIG_SMP as the compiler will optimize * the code away anyway (smp_num_cpus == 1 in UP) * noted by Artur Skawina <skawina@geocities.com>. * Make power off under SMP work again. * Fix thinko with initial engaging of BIOS. * Make sure power off only happens on CPU 0 * (Paul "Rusty" Russell <rusty@rustcorp.com.au>). * Do error notification to user mode if BIOS calls fail. * Move entrypoint offset fix to ...boot/setup.S * where it belongs (Cosmos <gis88564@cis.nctu.edu.tw>). * Remove smp-power-off. SMP users must now specify * "apm=power-off" on the kernel command line. Suggested * by Jim Avera <jima@hal.com>, modified by Alan Cox * <alan@lxorguk.ukuu.org.uk>. * Register the /proc/apm entry even on SMP so that * scripts that check for it before doing power off * work (Jim Avera <jima@hal.com>). * 1.13: Changes for new pm_ interfaces (Andy Henroid * <andy_henroid@yahoo.com>). * Modularize the code. * Fix the Thinkpad (again) :-( (CONFIG_APM_IGNORE_MULTIPLE_SUSPENDS * is now the way life works). * Fix thinko in suspend() (wrong return). * Notify drivers on critical suspend. * Make kapmd absorb more idle time (Pavel Machek <pavel@ucw.cz> * modified by sfr). * Disable interrupts while we are suspended (Andy Henroid * <andy_henroid@yahoo.com> fixed by sfr). * Make power off work on SMP again (Tony Hoyle * <tmh@magenta-logic.com> and <zlatko@iskon.hr>) modified by sfr. * Remove CONFIG_APM_SUSPEND_BOUNCE. The bounce ignore * interval is now configurable. * 1.14: Make connection version persist across module unload/load. * Enable and engage power management earlier. * Disengage power management on module unload. * Changed to use the sysrq-register hack for registering the * power off function called by magic sysrq based upon discussions * in irc://irc.openprojects.net/#kernelnewbies * (Crutcher Dunnavant <crutcher+kernel@datastacks.com>). * Make CONFIG_APM_REAL_MODE_POWER_OFF run time configurable. * (Arjan van de Ven <arjanv@redhat.com>) modified by sfr. * Work around byte swap bug in one of the Vaio's BIOS's * (Marc Boucher <marc@mbsi.ca>). * Exposed the disable flag to dmi so that we can handle known * broken APM (Alan Cox <alan@lxorguk.ukuu.org.uk>). * 1.14ac: If the BIOS says "I slowed the CPU down" then don't spin * calling it - instead idle. (Alan Cox <alan@lxorguk.ukuu.org.uk>) * If an APM idle fails log it and idle sensibly * 1.15: Don't queue events to clients who open the device O_WRONLY. * Don't expect replies from clients who open the device O_RDONLY. * (Idea from Thomas Hood) * Minor waitqueue cleanups. (John Fremlin <chief@bandits.org>) * 1.16: Fix idle calling. (Andreas Steinmetz <ast@domdv.de> et al.) * Notify listeners of standby or suspend events before notifying * drivers. Return EBUSY to ioctl() if suspend is rejected. * (Russell King <rmk@arm.linux.org.uk> and Thomas Hood) * Ignore first resume after we generate our own resume event * after a suspend (Thomas Hood) * Daemonize now gets rid of our controlling terminal (sfr). * CONFIG_APM_CPU_IDLE now just affects the default value of * idle_threshold (sfr). * Change name of kernel apm daemon (as it no longer idles) (sfr). * 1.16ac: Fix up SMP support somewhat. You can now force SMP on and we * make _all_ APM calls on the CPU#0. Fix unsafe sign bug. * TODO: determine if its "boot CPU" or "CPU0" we want to lock to. * * APM 1.1 Reference: * * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.1, September 1993. * Intel Order Number 241704-001. Microsoft Part Number 781-110-X01. * * [This document is available free from Intel by calling 800.628.8686 (fax * 916.356.6100) or 800.548.4725; or from * http://www.microsoft.com/whdc/archive/amp_12.mspx It is also * available from Microsoft by calling 206.882.8080.] * * APM 1.2 Reference: * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.2, February 1996. * * [This document is available from Microsoft at: * http://www.microsoft.com/whdc/archive/amp_12.mspx] */ #include <linux/module.h> #include <linux/poll.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/timer.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/apm_bios.h> #include <linux/init.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/pm.h> #include <linux/capability.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/freezer.h> #include <linux/smp.h> #include <linux/dmi.h> #include <linux/suspend.h> #include <linux/kthread.h> #include <linux/jiffies.h> #include <linux/acpi.h> #include <linux/syscore_ops.h> #include <linux/i8253.h> #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); #endif /* * The apm_bios device is one of the misc char devices. * This is its minor number. */ #define APM_MINOR_DEV 134 /* * Various options can be changed at boot time as follows: * (We allow underscores for compatibility with the modules code) * apm=on/off enable/disable APM * [no-]allow[-_]ints allow interrupts during BIOS calls * [no-]broken[-_]psr BIOS has a broken GetPowerStatus call * [no-]realmode[-_]power[-_]off switch to real mode before * powering off * [no-]debug log some debugging messages * [no-]power[-_]off power off on shutdown * [no-]smp Use apm even on an SMP box * bounce[-_]interval=<n> number of ticks to ignore suspend * bounces * idle[-_]threshold=<n> System idle percentage above which to * make APM BIOS idle calls. Set it to * 100 to disable. * idle[-_]period=<n> Period (in 1/100s of a second) over * which the idle percentage is * calculated. */ /* KNOWN PROBLEM MACHINES: * * U: TI 4000M TravelMate: BIOS is *NOT* APM compliant * [Confirmed by TI representative] * ?: ACER 486DX4/75: uses dseg 0040, in violation of APM specification * [Confirmed by BIOS disassembly] * [This may work now ...] * P: Toshiba 1950S: battery life information only gets updated after resume * P: Midwest Micro Soundbook Elite DX2/66 monochrome: screen blanking * broken in BIOS [Reported by Garst R. Reese <reese@isn.net>] * ?: AcerNote-950: oops on reading /proc/apm - workaround is a WIP * Neale Banks <neale@lowendale.com.au> December 2000 * * Legend: U = unusable with APM patches * P = partially usable with APM patches */ /* * Define as 1 to make the driver always call the APM BIOS busy * routine even if the clock was not reported as slowed by the * idle routine. Otherwise, define as 0. */ #define ALWAYS_CALL_BUSY 1 /* * Define to make the APM BIOS calls zero all data segment registers (so * that an incorrect BIOS implementation will cause a kernel panic if it * tries to write to arbitrary memory). */ #define APM_ZERO_SEGS #include <asm/apm.h> /* * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. * This patched by Chad Miller <cmiller@surfsouth.com>, original code by * David Chen <chen@ctpa04.mit.edu> */ #undef INIT_TIMER_AFTER_SUSPEND #ifdef INIT_TIMER_AFTER_SUSPEND #include <linux/timex.h> #include <asm/io.h> #include <linux/delay.h> #endif /* * Need to poll the APM BIOS every second */ #define APM_CHECK_TIMEOUT (HZ) /* * Ignore suspend events for this amount of time after a resume */ #define DEFAULT_BOUNCE_INTERVAL (3 * HZ) /* * Maximum number of events stored */ #define APM_MAX_EVENTS 20 /* * The per-file APM data */ struct apm_user { int magic; struct apm_user *next; unsigned int suser: 1; unsigned int writer: 1; unsigned int reader: 1; unsigned int suspend_wait: 1; int suspend_result; int suspends_pending; int standbys_pending; int suspends_read; int standbys_read; int event_head; int event_tail; apm_event_t events[APM_MAX_EVENTS]; }; /* * The magic number in apm_user */ #define APM_BIOS_MAGIC 0x4101 /* * idle percentage above which bios idle calls are done */ #ifdef CONFIG_APM_CPU_IDLE #warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012 #define DEFAULT_IDLE_THRESHOLD 95 #else #define DEFAULT_IDLE_THRESHOLD 100 #endif #define DEFAULT_IDLE_PERIOD (100 / 3) /* * Local variables */ static struct { unsigned long offset; unsigned short segment; } apm_bios_entry; static int clock_slowed; static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; static int set_pm_idle; static int suspends_pending; static int standbys_pending; static int ignore_sys_suspend; static int ignore_normal_resume; static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL; static bool debug __read_mostly; static bool smp __read_mostly; static int apm_disabled = -1; #ifdef CONFIG_SMP static bool power_off; #else static bool power_off = 1; #endif static bool realmode_power_off; #ifdef CONFIG_APM_ALLOW_INTS static bool allow_ints = 1; #else static bool allow_ints; #endif static bool broken_psr; static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); static struct apm_user *user_list; static DEFINE_SPINLOCK(user_list_lock); static DEFINE_MUTEX(apm_mutex); /* * Set up a segment that references the real mode segment 0x40 * that extends up to the end of page zero (that we have reserved). * This is for buggy BIOS's that refer to (real mode) segment 0x40 * even though they are called in protected mode. */ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); static const char driver_version[] = "1.16ac"; /* no spaces */ static struct task_struct *kapmd_task; /* * APM event names taken from the APM 1.2 specification. These are * the message codes that the BIOS uses to tell us about events */ static const char * const apm_event_name[] = { "system standby", "system suspend", "normal resume", "critical resume", "low battery", "power status change", "update time", "critical suspend", "user standby", "user suspend", "system standby resume", "capabilities change" }; #define NR_APM_EVENT_NAME ARRAY_SIZE(apm_event_name) typedef struct lookup_t { int key; char *msg; } lookup_t; /* * The BIOS returns a set of standard error codes in AX when the * carry flag is set. */ static const lookup_t error_table[] = { /* N/A { APM_SUCCESS, "Operation succeeded" }, */ { APM_DISABLED, "Power management disabled" }, { APM_CONNECTED, "Real mode interface already connected" }, { APM_NOT_CONNECTED, "Interface not connected" }, { APM_16_CONNECTED, "16 bit interface already connected" }, /* N/A { APM_16_UNSUPPORTED, "16 bit interface not supported" }, */ { APM_32_CONNECTED, "32 bit interface already connected" }, { APM_32_UNSUPPORTED, "32 bit interface not supported" }, { APM_BAD_DEVICE, "Unrecognized device ID" }, { APM_BAD_PARAM, "Parameter out of range" }, { APM_NOT_ENGAGED, "Interface not engaged" }, { APM_BAD_FUNCTION, "Function not supported" }, { APM_RESUME_DISABLED, "Resume timer disabled" }, { APM_BAD_STATE, "Unable to enter requested state" }, /* N/A { APM_NO_EVENTS, "No events pending" }, */ { APM_NO_ERROR, "BIOS did not set a return code" }, { APM_NOT_PRESENT, "No APM present" } }; #define ERROR_COUNT ARRAY_SIZE(error_table) /** * apm_error - display an APM error * @str: information string * @err: APM BIOS return code * * Write a meaningful log entry to the kernel log in the event of * an APM error. Note that this also handles (negative) kernel errors. */ static void apm_error(char *str, int err) { int i; for (i = 0; i < ERROR_COUNT; i++) if (error_table[i].key == err) break; if (i < ERROR_COUNT) printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); else if (err < 0) printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err); else printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", str, err); } /* * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and * apm_info.allow_ints, we are being really paranoid here! Not only * are interrupts disabled, but all the segment registers (except SS) * are saved and zeroed this means that if the BIOS tries to reference * any data without explicitly loading the segment registers, the kernel * will fault immediately rather than have some unforeseen circumstances * for the rest of the kernel. And it will be very obvious! :-) Doing * this depends on CS referring to the same physical memory as DS so that * DS can be zeroed before the call. Unfortunately, we can't do anything * about the stack segment/pointer. Also, we tell the compiler that * everything could change. * * Also, we KNOW that for the non error case of apm_bios_call, there * is no useful data returned in the low order 8 bits of eax. */ static inline unsigned long __apm_irq_save(void) { unsigned long flags; local_save_flags(flags); if (apm_info.allow_ints) { if (irqs_disabled_flags(flags)) local_irq_enable(); } else local_irq_disable(); return flags; } #define apm_irq_save(flags) \ do { flags = __apm_irq_save(); } while (0) static inline void apm_irq_restore(unsigned long flags) { if (irqs_disabled_flags(flags)) local_irq_disable(); else if (irqs_disabled()) local_irq_enable(); } #ifdef APM_ZERO_SEGS # define APM_DECL_SEGS \ unsigned int saved_fs; unsigned int saved_gs; # define APM_DO_SAVE_SEGS \ savesegment(fs, saved_fs); savesegment(gs, saved_gs) # define APM_DO_RESTORE_SEGS \ loadsegment(fs, saved_fs); loadsegment(gs, saved_gs) #else # define APM_DECL_SEGS # define APM_DO_SAVE_SEGS # define APM_DO_RESTORE_SEGS #endif struct apm_bios_call { u32 func; /* In and out */ u32 ebx; u32 ecx; /* Out only */ u32 eax; u32 edx; u32 esi; /* Error: -ENOMEM, or bits 8-15 of eax */ int err; }; /** * __apm_bios_call - Make an APM BIOS 32bit call * @_call: pointer to struct apm_bios_call. * * Make an APM call using the 32bit protected mode interface. The * caller is responsible for knowing if APM BIOS is configured and * enabled. This call can disable interrupts for a long period of * time on some laptops. The return value is in AH and the carry * flag is loaded into AL. If there is an error, then the error * code is returned in AH (bits 8-15 of eax) and this function * returns non-zero. * * Note: this makes the call on the current CPU. */ static long __apm_bios_call(void *_call) { APM_DECL_SEGS unsigned long flags; int cpu; struct desc_struct save_desc_40; struct desc_struct *gdt; struct apm_bios_call *call = _call; cpu = get_cpu(); BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); return call->eax & 0xff; } /* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */ static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call) { int ret; /* Don't bother with work_on_cpu in the common case, so we don't * have to worry about OOM or overhead. */ if (get_cpu() == 0) { ret = fn(call); put_cpu(); } else { put_cpu(); ret = work_on_cpu(0, fn, call); } /* work_on_cpu can fail with -ENOMEM */ if (ret < 0) call->err = ret; else call->err = (call->eax >> 8) & 0xff; return ret; } /** * apm_bios_call - Make an APM BIOS 32bit call (on CPU 0) * @call: the apm_bios_call registers. * * If there is an error, it is returned in @call.err. */ static int apm_bios_call(struct apm_bios_call *call) { return on_cpu0(__apm_bios_call, call); } /** * __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0) * @_call: pointer to struct apm_bios_call. * * Make a BIOS call that returns one value only, or just status. * If there is an error, then the error code is returned in AH * (bits 8-15 of eax) and this function returns non-zero (it can * also return -ENOMEM). This is used for simpler BIOS operations. * This call may hold interrupts off for a long time on some laptops. * * Note: this makes the call on the current CPU. */ static long __apm_bios_call_simple(void *_call) { u8 error; APM_DECL_SEGS unsigned long flags; int cpu; struct desc_struct save_desc_40; struct desc_struct *gdt; struct apm_bios_call *call = _call; cpu = get_cpu(); BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); return error; } /** * apm_bios_call_simple - make a simple APM BIOS 32bit call * @func: APM function to invoke * @ebx_in: EBX register value for BIOS call * @ecx_in: ECX register value for BIOS call * @eax: EAX register on return from the BIOS call * @err: bits * * Make a BIOS call that returns one value only, or just status. * If there is an error, then the error code is returned in @err * and this function returns non-zero. This is used for simpler * BIOS operations. This call may hold interrupts off for a long * time on some laptops. */ static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, int *err) { struct apm_bios_call call; int ret; call.func = func; call.ebx = ebx_in; call.ecx = ecx_in; ret = on_cpu0(__apm_bios_call_simple, &call); *eax = call.eax; *err = call.err; return ret; } /** * apm_driver_version - APM driver version * @val: loaded with the APM version on return * * Retrieve the APM version supported by the BIOS. This is only * supported for APM 1.1 or higher. An error indicates APM 1.0 is * probably present. * * On entry val should point to a value indicating the APM driver * version with the high byte being the major and the low byte the * minor number both in BCD * * On return it will hold the BIOS revision supported in the * same format. */ static int apm_driver_version(u_short *val) { u32 eax; int err; if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err)) return err; *val = eax; return APM_SUCCESS; } /** * apm_get_event - get an APM event from the BIOS * @event: pointer to the event * @info: point to the event information * * The APM BIOS provides a polled information for event * reporting. The BIOS expects to be polled at least every second * when events are pending. When a message is found the caller should * poll until no more messages are present. However, this causes * problems on some laptops where a suspend event notification is * not cleared until it is acknowledged. * * Additional information is returned in the info pointer, providing * that APM 1.2 is in use. If no messges are pending the value 0x80 * is returned (No power management events pending). */ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) { struct apm_bios_call call; call.func = APM_FUNC_GET_EVENT; call.ebx = call.ecx = 0; if (apm_bios_call(&call)) return call.err; *event = call.ebx; if (apm_info.connection_version < 0x0102) *info = ~0; /* indicate info not valid */ else *info = call.ecx; return APM_SUCCESS; } /** * set_power_state - set the power management state * @what: which items to transition * @state: state to transition to * * Request an APM change of state for one or more system devices. The * processor state must be transitioned last of all. what holds the * class of device in the upper byte and the device number (0xFF for * all) for the object to be transitioned. * * The state holds the state to transition to, which may in fact * be an acceptance of a BIOS requested state change. */ static int set_power_state(u_short what, u_short state) { u32 eax; int err; if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err)) return err; return APM_SUCCESS; } /** * set_system_power_state - set system wide power state * @state: which state to enter * * Transition the entire system into a new APM power state. */ static int set_system_power_state(u_short state) { return set_power_state(APM_DEVICE_ALL, state); } /** * apm_do_idle - perform power saving * * This function notifies the BIOS that the processor is (in the view * of the OS) idle. It returns -1 in the event that the BIOS refuses * to handle the idle request. On a success the function returns 1 * if the BIOS did clock slowing or 0 otherwise. */ static int apm_do_idle(void) { u32 eax; u8 ret = 0; int idled = 0; int polling; int err = 0; polling = !!(current_thread_info()->status & TS_POLLING); if (polling) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); } if (!need_resched()) { idled = 1; ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err); } if (polling) current_thread_info()->status |= TS_POLLING; if (!idled) return 0; if (ret) { static unsigned long t; /* This always fails on some SMP boards running UP kernels. * Only report the failure the first 5 times. */ if (++t < 5) { printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err); t = jiffies; } return -1; } clock_slowed = (apm_info.bios.flags & APM_IDLE_SLOWS_CLOCK) != 0; return clock_slowed; } /** * apm_do_busy - inform the BIOS the CPU is busy * * Request that the BIOS brings the CPU back to full performance. */ static void apm_do_busy(void) { u32 dummy; int err; if (clock_slowed || ALWAYS_CALL_BUSY) { (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err); clock_slowed = 0; } } /* * If no process has really been interested in * the CPU for some time, we want to call BIOS * power management - we probably want * to conserve power. */ #define IDLE_CALC_LIMIT (HZ * 100) #define IDLE_LEAKY_MAX 16 static void (*original_pm_idle)(void) __read_mostly; /** * apm_cpu_idle - cpu idling for APM capable Linux * * This is the idling function the kernel executes when APM is available. It * tries to do BIOS powermanagement based on the average system idle time. * Furthermore it calls the system default idle routine. */ static void apm_cpu_idle(void) { static int use_apm_idle; /* = 0 */ static unsigned int last_jiffies; /* = 0 */ static unsigned int last_stime; /* = 0 */ int apm_idle_done = 0; unsigned int jiffies_since_last_check = jiffies - last_jiffies; unsigned int bucket; WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); recalc: if (jiffies_since_last_check > IDLE_CALC_LIMIT) { use_apm_idle = 0; last_jiffies = jiffies; last_stime = current->stime; } else if (jiffies_since_last_check > idle_period) { unsigned int idle_percentage; idle_percentage = current->stime - last_stime; idle_percentage *= 100; idle_percentage /= jiffies_since_last_check; use_apm_idle = (idle_percentage > idle_threshold); if (apm_info.forbid_idle) use_apm_idle = 0; last_jiffies = jiffies; last_stime = current->stime; } bucket = IDLE_LEAKY_MAX; while (!need_resched()) { if (use_apm_idle) { unsigned int t; t = jiffies; switch (apm_do_idle()) { case 0: apm_idle_done = 1; if (t != jiffies) { if (bucket) { bucket = IDLE_LEAKY_MAX; continue; } } else if (bucket) { bucket--; continue; } break; case 1: apm_idle_done = 1; break; default: /* BIOS refused */ break; } } if (original_pm_idle) original_pm_idle(); else default_idle(); local_irq_disable(); jiffies_since_last_check = jiffies - last_jiffies; if (jiffies_since_last_check > idle_period) goto recalc; } if (apm_idle_done) apm_do_busy(); local_irq_enable(); } /** * apm_power_off - ask the BIOS to power off * * Handle the power off sequence. This is the one piece of code we * will execute even on SMP machines. In order to deal with BIOS * bugs we support real mode APM BIOS power off calls. We also make * the SMP call on CPU0 as some systems will only honour this call * on their first cpu. */ static void apm_power_off(void) { /* Some bioses don't like being called from CPU != 0 */ if (apm_info.realmode_power_off) { set_cpus_allowed_ptr(current, cpumask_of(0)); machine_real_restart(MRR_APM); } else { (void)set_system_power_state(APM_STATE_OFF); } } #ifdef CONFIG_APM_DO_ENABLE /** * apm_enable_power_management - enable BIOS APM power management * @enable: enable yes/no * * Enable or disable the APM BIOS power services. */ static int apm_enable_power_management(int enable) { u32 eax; int err; if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) return APM_NOT_ENGAGED; if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, enable, &eax, &err)) return err; if (enable) apm_info.bios.flags &= ~APM_BIOS_DISABLED; else apm_info.bios.flags |= APM_BIOS_DISABLED; return APM_SUCCESS; } #endif /** * apm_get_power_status - get current power state * @status: returned status * @bat: battery info * @life: estimated life * * Obtain the current power status from the APM BIOS. We return a * status which gives the rough battery status, and current power * source. The bat value returned give an estimate as a percentage * of life and a status value for the battery. The estimated life * if reported is a lifetime in secodnds/minutes at current powwer * consumption. */ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) { struct apm_bios_call call; call.func = APM_FUNC_GET_STATUS; call.ebx = APM_DEVICE_ALL; call.ecx = 0; if (apm_info.get_power_status_broken) return APM_32_UNSUPPORTED; if (apm_bios_call(&call)) return call.err; *status = call.ebx; *bat = call.ecx; if (apm_info.get_power_status_swabinminutes) { *life = swab16((u16)call.edx); *life |= 0x8000; } else *life = call.edx; return APM_SUCCESS; } #if 0 static int apm_get_battery_status(u_short which, u_short *status, u_short *bat, u_short *life, u_short *nbat) { u32 eax; u32 ebx; u32 ecx; u32 edx; u32 esi; if (apm_info.connection_version < 0x0102) { /* pretend we only have one battery. */ if (which != 1) return APM_BAD_DEVICE; *nbat = 1; return apm_get_power_status(status, bat, life); } if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax, &ebx, &ecx, &edx, &esi)) return (eax >> 8) & 0xff; *status = ebx; *bat = ecx; *life = edx; *nbat = esi; return APM_SUCCESS; } #endif /** * apm_engage_power_management - enable PM on a device * @device: identity of device * @enable: on/off * * Activate or deactive power management on either a specific device * or the entire system (%APM_DEVICE_ALL). */ static int apm_engage_power_management(u_short device, int enable) { u32 eax; int err; if ((enable == 0) && (device == APM_DEVICE_ALL) && (apm_info.bios.flags & APM_BIOS_DISABLED)) return APM_DISABLED; if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax, &err)) return err; if (device == APM_DEVICE_ALL) { if (enable) apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; else apm_info.bios.flags |= APM_BIOS_DISENGAGED; } return APM_SUCCESS; } #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) /** * apm_console_blank - blank the display * @blank: on/off * * Attempt to blank the console, firstly by blanking just video device * zero, and if that fails (some BIOSes don't support it) then it blanks * all video devices. Typically the BIOS will do laptop backlight and * monitor powerdown for us. */ static int apm_console_blank(int blank) { int error = APM_NOT_ENGAGED; /* silence gcc */ int i; u_short state; static const u_short dev[3] = { 0x100, 0x1FF, 0x101 }; state = blank ? APM_STATE_STANDBY : APM_STATE_READY; for (i = 0; i < ARRAY_SIZE(dev); i++) { error = set_power_state(dev[i], state); if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) return 1; if (error == APM_NOT_ENGAGED) break; } if (error == APM_NOT_ENGAGED) { static int tried; int eng_error; if (tried++ == 0) { eng_error = apm_engage_power_management(APM_DEVICE_ALL, 1); if (eng_error) { apm_error("set display", error); apm_error("engage interface", eng_error); return 0; } else return apm_console_blank(blank); } } apm_error("set display", error); return 0; } #endif static int queue_empty(struct apm_user *as) { return as->event_head == as->event_tail; } static apm_event_t get_queued_event(struct apm_user *as) { if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; return as->events[as->event_tail]; } static void queue_event(apm_event_t event, struct apm_user *sender) { struct apm_user *as; spin_lock(&user_list_lock); if (user_list == NULL) goto out; for (as = user_list; as != NULL; as = as->next) { if ((as == sender) || (!as->reader)) continue; if (++as->event_head >= APM_MAX_EVENTS) as->event_head = 0; if (as->event_head == as->event_tail) { static int notified; if (notified++ == 0) printk(KERN_ERR "apm: an event queue overflowed\n"); if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; } as->events[as->event_head] = event; if (!as->suser || !as->writer) continue; switch (event) { case APM_SYS_SUSPEND: case APM_USER_SUSPEND: as->suspends_pending++; suspends_pending++; break; case APM_SYS_STANDBY: case APM_USER_STANDBY: as->standbys_pending++; standbys_pending++; break; } } wake_up_interruptible(&apm_waitqueue); out: spin_unlock(&user_list_lock); } static void reinit_timer(void) { #ifdef INIT_TIMER_AFTER_SUSPEND unsigned long flags; raw_spin_lock_irqsave(&i8253_lock, flags); /* set the clock to HZ */ outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ udelay(10); outb_p(LATCH & 0xff, PIT_CH0); /* LSB */ udelay(10); outb_p(LATCH >> 8, PIT_CH0); /* MSB */ udelay(10); raw_spin_unlock_irqrestore(&i8253_lock, flags); #endif } static int suspend(int vetoable) { int err; struct apm_user *as; dpm_suspend_start(PMSG_SUSPEND); dpm_suspend_end(PMSG_SUSPEND); local_irq_disable(); syscore_suspend(); local_irq_enable(); save_processor_state(); err = set_system_power_state(APM_STATE_SUSPEND); ignore_normal_resume = 1; restore_processor_state(); local_irq_disable(); reinit_timer(); if (err == APM_NO_ERROR) err = APM_SUCCESS; if (err != APM_SUCCESS) apm_error("suspend", err); err = (err == APM_SUCCESS) ? 0 : -EIO; syscore_resume(); local_irq_enable(); dpm_resume_start(PMSG_RESUME); dpm_resume_end(PMSG_RESUME); queue_event(APM_NORMAL_RESUME, NULL); spin_lock(&user_list_lock); for (as = user_list; as != NULL; as = as->next) { as->suspend_wait = 0; as->suspend_result = err; } spin_unlock(&user_list_lock); wake_up_interruptible(&apm_suspend_waitqueue); return err; } static void standby(void) { int err; dpm_suspend_end(PMSG_SUSPEND); local_irq_disable(); syscore_suspend(); local_irq_enable(); err = set_system_power_state(APM_STATE_STANDBY); if ((err != APM_SUCCESS) && (err != APM_NO_ERROR)) apm_error("standby", err); local_irq_disable(); syscore_resume(); local_irq_enable(); dpm_resume_start(PMSG_RESUME); } static apm_event_t get_event(void) { int error; apm_event_t event = APM_NO_EVENTS; /* silence gcc */ apm_eventinfo_t info; static int notified; /* we don't use the eventinfo */ error = apm_get_event(&event, &info); if (error == APM_SUCCESS) return event; if ((error != APM_NO_EVENTS) && (notified++ == 0)) apm_error("get_event", error); return 0; } static void check_events(void) { apm_event_t event; static unsigned long last_resume; static int ignore_bounce; while ((event = get_event()) != 0) { if (debug) { if (event <= NR_APM_EVENT_NAME) printk(KERN_DEBUG "apm: received %s notify\n", apm_event_name[event - 1]); else printk(KERN_DEBUG "apm: received unknown " "event 0x%02x\n", event); } if (ignore_bounce && (time_after(jiffies, last_resume + bounce_interval))) ignore_bounce = 0; switch (event) { case APM_SYS_STANDBY: case APM_USER_STANDBY: queue_event(event, NULL); if (standbys_pending <= 0) standby(); break; case APM_USER_SUSPEND: #ifdef CONFIG_APM_IGNORE_USER_SUSPEND if (apm_info.connection_version > 0x100) set_system_power_state(APM_STATE_REJECT); break; #endif case APM_SYS_SUSPEND: if (ignore_bounce) { if (apm_info.connection_version > 0x100) set_system_power_state(APM_STATE_REJECT); break; } /* * If we are already processing a SUSPEND, * then further SUSPEND events from the BIOS * will be ignored. We also return here to * cope with the fact that the Thinkpads keep * sending a SUSPEND event until something else * happens! */ if (ignore_sys_suspend) return; ignore_sys_suspend = 1; queue_event(event, NULL); if (suspends_pending <= 0) (void) suspend(1); break; case APM_NORMAL_RESUME: case APM_CRITICAL_RESUME: case APM_STANDBY_RESUME: ignore_sys_suspend = 0; last_resume = jiffies; ignore_bounce = 1; if ((event != APM_NORMAL_RESUME) || (ignore_normal_resume == 0)) { dpm_resume_end(PMSG_RESUME); queue_event(event, NULL); } ignore_normal_resume = 0; break; case APM_CAPABILITY_CHANGE: case APM_LOW_BATTERY: case APM_POWER_STATUS_CHANGE: queue_event(event, NULL); /* If needed, notify drivers here */ break; case APM_UPDATE_TIME: break; case APM_CRITICAL_SUSPEND: /* * We are not allowed to reject a critical suspend. */ (void)suspend(0); break; } } } static void apm_event_handler(void) { static int pending_count = 4; int err; if ((standbys_pending > 0) || (suspends_pending > 0)) { if ((apm_info.connection_version > 0x100) && (pending_count-- <= 0)) { pending_count = 4; if (debug) printk(KERN_DEBUG "apm: setting state busy\n"); err = set_system_power_state(APM_STATE_BUSY); if (err) apm_error("busy", err); } } else pending_count = 4; check_events(); } /* * This is the APM thread main loop. */ static void apm_mainloop(void) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&apm_waitqueue, &wait); set_current_state(TASK_INTERRUPTIBLE); for (;;) { schedule_timeout(APM_CHECK_TIMEOUT); if (kthread_should_stop()) break; /* * Ok, check all events, check for idle (and mark us sleeping * so as not to count towards the load average).. */ set_current_state(TASK_INTERRUPTIBLE); apm_event_handler(); } remove_wait_queue(&apm_waitqueue, &wait); } static int check_apm_user(struct apm_user *as, const char *func) { if (as == NULL || as->magic != APM_BIOS_MAGIC) { printk(KERN_ERR "apm: %s passed bad filp\n", func); return 1; } return 0; } static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct apm_user *as; int i; apm_event_t event; as = fp->private_data; if (check_apm_user(as, "read")) return -EIO; if ((int)count < sizeof(apm_event_t)) return -EINVAL; if ((queue_empty(as)) && (fp->f_flags & O_NONBLOCK)) return -EAGAIN; wait_event_interruptible(apm_waitqueue, !queue_empty(as)); i = count; while ((i >= sizeof(event)) && !queue_empty(as)) { event = get_queued_event(as); if (copy_to_user(buf, &event, sizeof(event))) { if (i < count) break; return -EFAULT; } switch (event) { case APM_SYS_SUSPEND: case APM_USER_SUSPEND: as->suspends_read++; break; case APM_SYS_STANDBY: case APM_USER_STANDBY: as->standbys_read++; break; } buf += sizeof(event); i -= sizeof(event); } if (i < count) return count - i; if (signal_pending(current)) return -ERESTARTSYS; return 0; } static unsigned int do_poll(struct file *fp, poll_table *wait) { struct apm_user *as; as = fp->private_data; if (check_apm_user(as, "poll")) return 0; poll_wait(fp, &apm_waitqueue, wait); if (!queue_empty(as)) return POLLIN | POLLRDNORM; return 0; } static long do_ioctl(struct file *filp, u_int cmd, u_long arg) { struct apm_user *as; int ret; as = filp->private_data; if (check_apm_user(as, "ioctl")) return -EIO; if (!as->suser || !as->writer) return -EPERM; switch (cmd) { case APM_IOC_STANDBY: mutex_lock(&apm_mutex); if (as->standbys_read > 0) { as->standbys_read--; as->standbys_pending--; standbys_pending--; } else queue_event(APM_USER_STANDBY, as); if (standbys_pending <= 0) standby(); mutex_unlock(&apm_mutex); break; case APM_IOC_SUSPEND: mutex_lock(&apm_mutex); if (as->suspends_read > 0) { as->suspends_read--; as->suspends_pending--; suspends_pending--; } else queue_event(APM_USER_SUSPEND, as); if (suspends_pending <= 0) { ret = suspend(1); mutex_unlock(&apm_mutex); } else { as->suspend_wait = 1; mutex_unlock(&apm_mutex); wait_event_interruptible(apm_suspend_waitqueue, as->suspend_wait == 0); ret = as->suspend_result; } return ret; default: return -ENOTTY; } return 0; } static int do_release(struct inode *inode, struct file *filp) { struct apm_user *as; as = filp->private_data; if (check_apm_user(as, "release")) return 0; filp->private_data = NULL; if (as->standbys_pending > 0) { standbys_pending -= as->standbys_pending; if (standbys_pending <= 0) standby(); } if (as->suspends_pending > 0) { suspends_pending -= as->suspends_pending; if (suspends_pending <= 0) (void) suspend(1); } spin_lock(&user_list_lock); if (user_list == as) user_list = as->next; else { struct apm_user *as1; for (as1 = user_list; (as1 != NULL) && (as1->next != as); as1 = as1->next) ; if (as1 == NULL) printk(KERN_ERR "apm: filp not in user list\n"); else as1->next = as->next; } spin_unlock(&user_list_lock); kfree(as); return 0; } static int do_open(struct inode *inode, struct file *filp) { struct apm_user *as; as = kmalloc(sizeof(*as), GFP_KERNEL); if (as == NULL) { printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", sizeof(*as)); return -ENOMEM; } as->magic = APM_BIOS_MAGIC; as->event_tail = as->event_head = 0; as->suspends_pending = as->standbys_pending = 0; as->suspends_read = as->standbys_read = 0; /* * XXX - this is a tiny bit broken, when we consider BSD * process accounting. If the device is opened by root, we * instantly flag that we used superuser privs. Who knows, * we might close the device immediately without doing a * privileged operation -- cevans */ as->suser = capable(CAP_SYS_ADMIN); as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; spin_lock(&user_list_lock); as->next = user_list; user_list = as; spin_unlock(&user_list_lock); filp->private_data = as; return 0; } static int proc_apm_show(struct seq_file *m, void *v) { unsigned short bx; unsigned short cx; unsigned short dx; int error; unsigned short ac_line_status = 0xff; unsigned short battery_status = 0xff; unsigned short battery_flag = 0xff; int percentage = -1; int time_units = -1; char *units = "?"; if ((num_online_cpus() == 1) && !(error = apm_get_power_status(&bx, &cx, &dx))) { ac_line_status = (bx >> 8) & 0xff; battery_status = bx & 0xff; if ((cx & 0xff) != 0xff) percentage = cx & 0xff; if (apm_info.connection_version > 0x100) { battery_flag = (cx >> 8) & 0xff; if (dx != 0xffff) { units = (dx & 0x8000) ? "min" : "sec"; time_units = dx & 0x7fff; } } } /* Arguments, with symbols from linux/apm_bios.h. Information is from the Get Power Status (0x0a) call unless otherwise noted. 0) Linux driver version (this will change if format changes) 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. 2) APM flags from APM Installation Check (0x00): bit 0: APM_16_BIT_SUPPORT bit 1: APM_32_BIT_SUPPORT bit 2: APM_IDLE_SLOWS_CLOCK bit 3: APM_BIOS_DISABLED bit 4: APM_BIOS_DISENGAGED 3) AC line status 0x00: Off-line 0x01: On-line 0x02: On backup power (BIOS >= 1.1 only) 0xff: Unknown 4) Battery status 0x00: High 0x01: Low 0x02: Critical 0x03: Charging 0x04: Selected battery not present (BIOS >= 1.2 only) 0xff: Unknown 5) Battery flag bit 0: High bit 1: Low bit 2: Critical bit 3: Charging bit 7: No system battery 0xff: Unknown 6) Remaining battery life (percentage of charge): 0-100: valid -1: Unknown 7) Remaining battery life (time units): Number of remaining minutes or seconds -1: Unknown 8) min = minutes; sec = seconds */ seq_printf(m, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", driver_version, (apm_info.bios.version >> 8) & 0xff, apm_info.bios.version & 0xff, apm_info.bios.flags, ac_line_status, battery_status, battery_flag, percentage, time_units, units); return 0; } static int proc_apm_open(struct inode *inode, struct file *file) { return single_open(file, proc_apm_show, NULL); } static const struct file_operations apm_file_ops = { .owner = THIS_MODULE, .open = proc_apm_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int apm(void *unused) { unsigned short bx; unsigned short cx; unsigned short dx; int error; char *power_stat; char *bat_stat; /* 2002/08/01 - WT * This is to avoid random crashes at boot time during initialization * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. * Some bioses don't like being called from CPU != 0. * Method suggested by Ingo Molnar. */ set_cpus_allowed_ptr(current, cpumask_of(0)); BUG_ON(smp_processor_id() != 0); if (apm_info.connection_version == 0) { apm_info.connection_version = apm_info.bios.version; if (apm_info.connection_version > 0x100) { /* * We only support BIOSs up to version 1.2 */ if (apm_info.connection_version > 0x0102) apm_info.connection_version = 0x0102; error = apm_driver_version(&apm_info.connection_version); if (error != APM_SUCCESS) { apm_error("driver version", error); /* Fall back to an APM 1.0 connection. */ apm_info.connection_version = 0x100; } } } if (debug) printk(KERN_INFO "apm: Connection version %d.%d\n", (apm_info.connection_version >> 8) & 0xff, apm_info.connection_version & 0xff); #ifdef CONFIG_APM_DO_ENABLE if (apm_info.bios.flags & APM_BIOS_DISABLED) { /* * This call causes my NEC UltraLite Versa 33/C to hang if it * is booted with PM disabled but not in the docking station. * Unfortunate ... */ error = apm_enable_power_management(1); if (error) { apm_error("enable power management", error); return -1; } } #endif if ((apm_info.bios.flags & APM_BIOS_DISENGAGED) && (apm_info.connection_version > 0x0100)) { error = apm_engage_power_management(APM_DEVICE_ALL, 1); if (error) { apm_error("engage power management", error); return -1; } } if (debug && (num_online_cpus() == 1 || smp)) { error = apm_get_power_status(&bx, &cx, &dx); if (error) printk(KERN_INFO "apm: power status not available\n"); else { switch ((bx >> 8) & 0xff) { case 0: power_stat = "off line"; break; case 1: power_stat = "on line"; break; case 2: power_stat = "on backup power"; break; default: power_stat = "unknown"; break; } switch (bx & 0xff) { case 0: bat_stat = "high"; break; case 1: bat_stat = "low"; break; case 2: bat_stat = "critical"; break; case 3: bat_stat = "charging"; break; default: bat_stat = "unknown"; break; } printk(KERN_INFO "apm: AC %s, battery status %s, battery life ", power_stat, bat_stat); if ((cx & 0xff) == 0xff) printk("unknown\n"); else printk("%d%%\n", cx & 0xff); if (apm_info.connection_version > 0x100) { printk(KERN_INFO "apm: battery flag 0x%02x, battery life ", (cx >> 8) & 0xff); if (dx == 0xffff) printk("unknown\n"); else printk("%d %s\n", dx & 0x7fff, (dx & 0x8000) ? "minutes" : "seconds"); } } } /* Install our power off handler.. */ if (power_off) pm_power_off = apm_power_off; if (num_online_cpus() == 1 || smp) { #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = apm_console_blank; #endif apm_mainloop(); #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = NULL; #endif } return 0; } #ifndef MODULE static int __init apm_setup(char *str) { int invert; while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) apm_disabled = 1; if (strncmp(str, "on", 2) == 0) apm_disabled = 0; if ((strncmp(str, "bounce-interval=", 16) == 0) || (strncmp(str, "bounce_interval=", 16) == 0)) bounce_interval = simple_strtol(str + 16, NULL, 0); if ((strncmp(str, "idle-threshold=", 15) == 0) || (strncmp(str, "idle_threshold=", 15) == 0)) idle_threshold = simple_strtol(str + 15, NULL, 0); if ((strncmp(str, "idle-period=", 12) == 0) || (strncmp(str, "idle_period=", 12) == 0)) idle_period = simple_strtol(str + 12, NULL, 0); invert = (strncmp(str, "no-", 3) == 0) || (strncmp(str, "no_", 3) == 0); if (invert) str += 3; if (strncmp(str, "debug", 5) == 0) debug = !invert; if ((strncmp(str, "power-off", 9) == 0) || (strncmp(str, "power_off", 9) == 0)) power_off = !invert; if (strncmp(str, "smp", 3) == 0) { smp = !invert; idle_threshold = 100; } if ((strncmp(str, "allow-ints", 10) == 0) || (strncmp(str, "allow_ints", 10) == 0)) apm_info.allow_ints = !invert; if ((strncmp(str, "broken-psr", 10) == 0) || (strncmp(str, "broken_psr", 10) == 0)) apm_info.get_power_status_broken = !invert; if ((strncmp(str, "realmode-power-off", 18) == 0) || (strncmp(str, "realmode_power_off", 18) == 0)) apm_info.realmode_power_off = !invert; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("apm=", apm_setup); #endif static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = do_read, .poll = do_poll, .unlocked_ioctl = do_ioctl, .open = do_open, .release = do_release, .llseek = noop_llseek, }; static struct miscdevice apm_device = { APM_MINOR_DEV, "apm_bios", &apm_bios_fops }; /* Simple "print if true" callback */ static int __init print_if_true(const struct dmi_system_id *d) { printk("%s\n", d->ident); return 0; } /* * Some Bioses enable the PS/2 mouse (touchpad) at resume, even if it was * disabled before the suspend. Linux used to get terribly confused by that. */ static int __init broken_ps2_resume(const struct dmi_system_id *d) { printk(KERN_INFO "%s machine detected. Mousepad Resume Bug " "workaround hopefully not needed.\n", d->ident); return 0; } /* Some bioses have a broken protected mode poweroff and need to use realmode */ static int __init set_realmode_power_off(const struct dmi_system_id *d) { if (apm_info.realmode_power_off == 0) { apm_info.realmode_power_off = 1; printk(KERN_INFO "%s bios detected. " "Using realmode poweroff only.\n", d->ident); } return 0; } /* Some laptops require interrupts to be enabled during APM calls */ static int __init set_apm_ints(const struct dmi_system_id *d) { if (apm_info.allow_ints == 0) { apm_info.allow_ints = 1; printk(KERN_INFO "%s machine detected. " "Enabling interrupts during APM calls.\n", d->ident); } return 0; } /* Some APM bioses corrupt memory or just plain do not work */ static int __init apm_is_horked(const struct dmi_system_id *d) { if (apm_info.disabled == 0) { apm_info.disabled = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM.\n", d->ident); } return 0; } static int __init apm_is_horked_d850md(const struct dmi_system_id *d) { if (apm_info.disabled == 0) { apm_info.disabled = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM.\n", d->ident); printk(KERN_INFO "This bug is fixed in bios P15 which is available for\n"); printk(KERN_INFO "download from support.intel.com\n"); } return 0; } /* Some APM bioses hang on APM idle calls */ static int __init apm_likes_to_melt(const struct dmi_system_id *d) { if (apm_info.forbid_idle == 0) { apm_info.forbid_idle = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM idle calls.\n", d->ident); } return 0; } /* * Check for clue free BIOS implementations who use * the following QA technique * * [ Write BIOS Code ]<------ * | ^ * < Does it Compile >----N-- * |Y ^ * < Does it Boot Win98 >-N-- * |Y * [Ship It] * * Phoenix A04 08/24/2000 is known bad (Dell Inspiron 5000e) * Phoenix A07 09/29/2000 is known good (Dell Inspiron 5000) */ static int __init broken_apm_power(const struct dmi_system_id *d) { apm_info.get_power_status_broken = 1; printk(KERN_WARNING "BIOS strings suggest APM bugs, " "disabling power status reporting.\n"); return 0; } /* * This bios swaps the APM minute reporting bytes over (Many sony laptops * have this problem). */ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d) { apm_info.get_power_status_swabinminutes = 1; printk(KERN_WARNING "BIOS strings suggest APM reports battery life " "in minutes and wrong byte order.\n"); return 0; } static struct dmi_system_id __initdata apm_dmi_table[] = { { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), }, }, { /* Handle problems with APM on the C600 */ broken_ps2_resume, "Dell Latitude C600", { DMI_MATCH(DMI_SYS_VENDOR, "Dell"), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), }, }, { /* Allow interrupts during suspend on Dell Latitude laptops*/ set_apm_ints, "Dell Latitude", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), } }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* Allow interrupts during suspend on Dell Inspiron laptops*/ set_apm_ints, "Dell Inspiron", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), }, }, { /* Handle problems with APM on Inspiron 5000e */ broken_apm_power, "Dell Inspiron 5000e", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A04"), DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), }, }, { /* Handle problems with APM on Inspiron 2500 */ broken_apm_power, "Dell Inspiron 2500", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A12"), DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), }, }, { /* APM crashes */ apm_is_horked, "Dell Dimension 4100", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* Allow interrupts during suspend on Compaq Laptops*/ set_apm_ints, "Compaq 12XL125", { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "4.06"), }, }, { /* Allow interrupts during APM or the clock goes slow */ set_apm_ints, "ASUSTeK", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), }, }, { /* APM blows on shutdown */ apm_is_horked, "ABIT KX7-333[R]", { DMI_MATCH(DMI_BOARD_VENDOR, "ABIT"), DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), }, }, { /* APM crashes */ apm_is_horked, "Trigem Delhi3", { DMI_MATCH(DMI_SYS_VENDOR, "TriGem Computer, Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), }, }, { /* APM crashes */ apm_is_horked, "Fujitsu-Siemens", { DMI_MATCH(DMI_BIOS_VENDOR, "hoenix/FUJITSU SIEMENS"), DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), }, }, { /* APM crashes */ apm_is_horked_d850md, "Intel D850MD", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), }, }, { /* APM crashes */ apm_is_horked, "Intel D810EMO", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), }, }, { /* APM crashes */ apm_is_horked, "Dell XPS-Z", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), }, }, { /* APM crashes */ apm_is_horked, "Sharp PC-PJ/AX", { DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"), DMI_MATCH(DMI_BIOS_VENDOR, "SystemSoft"), DMI_MATCH(DMI_BIOS_VERSION, "Version R2.08"), }, }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* APM idle hangs */ apm_likes_to_melt, "Jabil AMD", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), }, }, { /* APM idle hangs */ apm_likes_to_melt, "AMI Bios", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), }, }, { /* Handle problems with APM on Sony Vaio PCG-N505X(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206H"), DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), }, }, { /* Handle problems with APM on Sony Vaio PCG-N505VX */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "W2K06H0"), DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-XG29 */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0117A0"), DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0121Z1"), DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WME01Z1"), DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600LEK(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206Z3"), DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203D0"), DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203Z3"), DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS (with updated BIOS) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0209Z3"), DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), }, }, { /* Handle problems with APM on Sony Vaio PCG-F104K */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204K2"), DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VN/C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0208P1"), DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204P1"), DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WXPO1Z3"), DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), }, }, { /* broken PM poweroff bios */ set_realmode_power_off, "Award Software v4.60 PGMA", { DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), DMI_MATCH(DMI_BIOS_VERSION, "4.60 PGMA"), DMI_MATCH(DMI_BIOS_DATE, "134526184"), }, }, /* Generic per vendor APM settings */ { /* Allow interrupts during suspend on IBM laptops */ set_apm_ints, "IBM", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, { } }; /* * Just start the APM thread. We do NOT want to do APM BIOS * calls from anything but the APM thread, if for no other reason * than the fact that we don't trust the APM BIOS. This way, * most common APM BIOS problems that lead to protection errors * etc will have at least some level of being contained... * * In short, if something bad happens, at least we have a choice * of just killing the apm thread.. */ static int __init apm_init(void) { struct desc_struct *gdt; int err; dmi_check_system(apm_dmi_table); if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) { printk(KERN_INFO "apm: BIOS not found.\n"); return -ENODEV; } printk(KERN_INFO "apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n", ((apm_info.bios.version >> 8) & 0xff), (apm_info.bios.version & 0xff), apm_info.bios.flags, driver_version); if ((apm_info.bios.flags & APM_32_BIT_SUPPORT) == 0) { printk(KERN_INFO "apm: no 32 bit BIOS support\n"); return -ENODEV; } if (allow_ints) apm_info.allow_ints = 1; if (broken_psr) apm_info.get_power_status_broken = 1; if (realmode_power_off) apm_info.realmode_power_off = 1; /* User can override, but default is to trust DMI */ if (apm_disabled != -1) apm_info.disabled = apm_disabled; /* * Fix for the Compaq Contura 3/25c which reports BIOS version 0.1 * but is reportedly a 1.0 BIOS. */ if (apm_info.bios.version == 0x001) apm_info.bios.version = 0x100; /* BIOS < 1.2 doesn't set cseg_16_len */ if (apm_info.bios.version < 0x102) apm_info.bios.cseg_16_len = 0; /* 64k */ if (debug) { printk(KERN_INFO "apm: entry %x:%x cseg16 %x dseg %x", apm_info.bios.cseg, apm_info.bios.offset, apm_info.bios.cseg_16, apm_info.bios.dseg); if (apm_info.bios.version > 0x100) printk(" cseg len %x, dseg len %x", apm_info.bios.cseg_len, apm_info.bios.dseg_len); if (apm_info.bios.version > 0x101) printk(" cseg16 len %x", apm_info.bios.cseg_16_len); printk("\n"); } if (apm_info.disabled) { printk(KERN_NOTICE "apm: disabled on user request.\n"); return -ENODEV; } if ((num_online_cpus() > 1) && !power_off && !smp) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n"); apm_info.disabled = 1; return -ENODEV; } if (!acpi_disabled) { printk(KERN_NOTICE "apm: overridden by ACPI.\n"); apm_info.disabled = 1; return -ENODEV; } /* * Set up the long jump entry point to the APM BIOS, which is called * from inline assembly. */ apm_bios_entry.offset = apm_info.bios.offset; apm_bios_entry.segment = APM_CS; /* * The APM 1.1 BIOS is supposed to provide limit information that it * recognizes. Many machines do this correctly, but many others do * not restrict themselves to their claimed limit. When this happens, * they will cause a segmentation violation in the kernel at boot time. * Most BIOS's, however, will respect a 64k limit, so we use that. * * Note we only set APM segments on CPU zero, since we pin the APM * code to that CPU. */ gdt = get_cpu_gdt_table(0); set_desc_base(&gdt[APM_CS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); set_desc_base(&gdt[APM_CS_16 >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); proc_create("apm", 0, NULL, &apm_file_ops); kapmd_task = kthread_create(apm, NULL, "kapmd"); if (IS_ERR(kapmd_task)) { printk(KERN_ERR "apm: disabled - Unable to start kernel " "thread.\n"); err = PTR_ERR(kapmd_task); kapmd_task = NULL; remove_proc_entry("apm", NULL); return err; } wake_up_process(kapmd_task); if (num_online_cpus() > 1 && !smp) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe (power off active).\n"); return 0; } /* * Note we don't actually care if the misc_device cannot be registered. * this driver can do its job without it, even if userspace can't * control it. just log the error */ if (misc_register(&apm_device)) printk(KERN_WARNING "apm: Could not register misc device.\n"); if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { original_pm_idle = pm_idle; pm_idle = apm_cpu_idle; set_pm_idle = 1; } return 0; } static void __exit apm_exit(void) { int error; if (set_pm_idle) { pm_idle = original_pm_idle; /* * We are about to unload the current idle thread pm callback * (pm_idle), Wait for all processors to update cached/local * copies of pm_idle before proceeding. */ cpu_idle_wait(); } if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) && (apm_info.connection_version > 0x0100)) { error = apm_engage_power_management(APM_DEVICE_ALL, 0); if (error) apm_error("disengage power management", error); } misc_deregister(&apm_device); remove_proc_entry("apm", NULL); if (power_off) pm_power_off = NULL; if (kapmd_task) { kthread_stop(kapmd_task); kapmd_task = NULL; } } module_init(apm_init); module_exit(apm_exit); MODULE_AUTHOR("Stephen Rothwell"); MODULE_DESCRIPTION("Advanced Power Management"); MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Enable debug mode"); module_param(power_off, bool, 0444); MODULE_PARM_DESC(power_off, "Enable power off"); module_param(bounce_interval, int, 0444); MODULE_PARM_DESC(bounce_interval, "Set the number of ticks to ignore suspend bounces"); module_param(allow_ints, bool, 0444); MODULE_PARM_DESC(allow_ints, "Allow interrupts during BIOS calls"); module_param(broken_psr, bool, 0444); MODULE_PARM_DESC(broken_psr, "BIOS has a broken GetPowerStatus call"); module_param(realmode_power_off, bool, 0444); MODULE_PARM_DESC(realmode_power_off, "Switch to real mode before powering off"); module_param(idle_threshold, int, 0444); MODULE_PARM_DESC(idle_threshold, "System idle percentage above which to make APM BIOS idle calls"); module_param(idle_period, int, 0444); MODULE_PARM_DESC(idle_period, "Period (in sec/100) over which to caculate the idle percentage"); module_param(smp, bool, 0444); MODULE_PARM_DESC(smp, "Set this to enable APM use on an SMP platform. Use with caution on older systems"); MODULE_ALIAS_MISCDEV(APM_MINOR_DEV);
gpl-2.0
deepjyotisaran/android_kernel_samsung_exynos5410
drivers/media/dvb/frontends/ix2505v.c
5177
7899
/** * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner * * Copyright (C) 2010 Malcolm Priestley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/dvb/frontend.h> #include <linux/slab.h> #include <linux/types.h> #include "ix2505v.h" static int ix2505v_debug; #define dprintk(level, args...) do { \ if (ix2505v_debug & level) \ printk(KERN_DEBUG "ix2505v: " args); \ } while (0) #define deb_info(args...) dprintk(0x01, args) #define deb_i2c(args...) dprintk(0x02, args) struct ix2505v_state { struct i2c_adapter *i2c; const struct ix2505v_config *config; u32 frequency; }; /** * Data read format of the Sharp IX2505V B0017 * * byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1 * byte2: POR | FL | RD2 | RD1 | RD0 | X | X | X * * byte1 = address * byte2; * POR = Power on Reset (VCC H=<2.2v L=>2.2v) * FL = Phase Lock (H=lock L=unlock) * RD0-2 = Reserved internal operations * * Only POR can be used to check the tuner is present * * Caution: after byte2 the I2C reverts to write mode continuing to read * may corrupt tuning data. * */ static int ix2505v_read_status_reg(struct ix2505v_state *state) { u8 addr = state->config->tuner_address; u8 b2[] = {0}; int ret; struct i2c_msg msg[1] = { { .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 1); deb_i2c("Read %s ", __func__); return (ret == 1) ? (int) b2[0] : -1; } static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count) { struct i2c_msg msg[1] = { { .addr = state->config->tuner_address, .flags = 0, .buf = buf, .len = count }, }; int ret; ret = i2c_transfer(state->i2c, msg, 1); if (ret != 1) { deb_i2c("%s: i2c error, ret=%d\n", __func__, ret); return -EIO; } return 0; } static int ix2505v_release(struct dvb_frontend *fe) { struct ix2505v_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } /** * Data write format of the Sharp IX2505V B0017 * * byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0 * byte2: 0 | BG1 | BG2 | N8 | N7 | N6 | N5 | N4 * byte3: N3 | N2 | N1 | A5 | A4 | A3 | A2 | A1 * byte4: 1 | 1(C1) | 1(C0) | PD5 | PD4 | TM | 0(RTS)| 1(REF) * byte5: BA2 | BA1 | BA0 | PSC | PD3 |PD2/TS2|DIV/TS1|PD0/TS0 * * byte1 = address * * Write order * 1) byte1 -> byte2 -> byte3 -> byte4 -> byte5 * 2) byte1 -> byte4 -> byte5 -> byte2 -> byte3 * 3) byte1 -> byte2 -> byte3 -> byte4 * 4) byte1 -> byte4 -> byte5 -> byte2 * 5) byte1 -> byte2 -> byte3 * 6) byte1 -> byte4 -> byte5 * 7) byte1 -> byte2 * 8) byte1 -> byte4 * * Recommended Setup * 1 -> 8 -> 6 */ static int ix2505v_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct ix2505v_state *state = fe->tuner_priv; u32 frequency = c->frequency; u32 b_w = (c->symbol_rate * 27) / 32000; u32 div_factor, N , A, x; int ret = 0, len; u8 gain, cc, ref, psc, local_osc, lpf; u8 data[4] = {0}; if ((frequency < fe->ops.info.frequency_min) || (frequency > fe->ops.info.frequency_max)) return -EINVAL; if (state->config->tuner_gain) gain = (state->config->tuner_gain < 4) ? state->config->tuner_gain : 0; else gain = 0x0; if (state->config->tuner_chargepump) cc = state->config->tuner_chargepump; else cc = 0x3; ref = 8; /* REF =1 */ psc = 32; /* PSC = 0 */ div_factor = (frequency * ref) / 40; /* local osc = 4Mhz */ x = div_factor / psc; N = x/100; A = ((x - (N * 100)) * psc) / 100; data[0] = ((gain & 0x3) << 5) | (N >> 3); data[1] = (N << 5) | (A & 0x1f); data[2] = 0x81 | ((cc & 0x3) << 5) ; /*PD5,PD4 & TM = 0|C1,C0|REF=1*/ deb_info("Frq=%d x=%d N=%d A=%d\n", frequency, x, N, A); if (frequency <= 1065000) local_osc = (6 << 5) | 2; else if (frequency <= 1170000) local_osc = (7 << 5) | 2; else if (frequency <= 1300000) local_osc = (1 << 5); else if (frequency <= 1445000) local_osc = (2 << 5); else if (frequency <= 1607000) local_osc = (3 << 5); else if (frequency <= 1778000) local_osc = (4 << 5); else if (frequency <= 1942000) local_osc = (5 << 5); else /*frequency up to 2150000*/ local_osc = (6 << 5); data[3] = local_osc; /* all other bits set 0 */ if (b_w <= 10000) lpf = 0xc; else if (b_w <= 12000) lpf = 0x2; else if (b_w <= 14000) lpf = 0xa; else if (b_w <= 16000) lpf = 0x6; else if (b_w <= 18000) lpf = 0xe; else if (b_w <= 20000) lpf = 0x1; else if (b_w <= 22000) lpf = 0x9; else if (b_w <= 24000) lpf = 0x5; else if (b_w <= 26000) lpf = 0xd; else if (b_w <= 28000) lpf = 0x3; else lpf = 0xb; deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf); deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); len = sizeof(data); ret |= ix2505v_write(state, data, len); data[2] |= 0x4; /* set TM = 1 other bits same */ if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); len = 1; ret |= ix2505v_write(state, &data[2], len); /* write byte 4 only */ msleep(10); data[2] |= ((lpf >> 2) & 0x3) << 3; /* lpf */ data[3] |= (lpf & 0x3) << 2; deb_info("Data 2=[%x%x]\n", data[2], data[3]); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); len = 2; ret |= ix2505v_write(state, &data[2], len); /* write byte 4 & 5 */ if (state->config->min_delay_ms) msleep(state->config->min_delay_ms); state->frequency = frequency; return ret; } static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct ix2505v_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static struct dvb_tuner_ops ix2505v_tuner_ops = { .info = { .name = "Sharp IX2505V (B0017)", .frequency_min = 950000, .frequency_max = 2175000 }, .release = ix2505v_release, .set_params = ix2505v_set_params, .get_frequency = ix2505v_get_frequency, }; struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe, const struct ix2505v_config *config, struct i2c_adapter *i2c) { struct ix2505v_state *state = NULL; int ret; if (NULL == config) { deb_i2c("%s: no config ", __func__); goto error; } state = kzalloc(sizeof(struct ix2505v_state), GFP_KERNEL); if (NULL == state) return NULL; state->config = config; state->i2c = i2c; if (state->config->tuner_write_only) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = ix2505v_read_status_reg(state); if (ret & 0x80) { deb_i2c("%s: No IX2505V found\n", __func__); goto error; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } fe->tuner_priv = state; memcpy(&fe->ops.tuner_ops, &ix2505v_tuner_ops, sizeof(struct dvb_tuner_ops)); deb_i2c("%s: initialization (%s addr=0x%02x) ok\n", __func__, fe->ops.tuner_ops.info.name, config->tuner_address); return fe; error: kfree(state); return NULL; } EXPORT_SYMBOL(ix2505v_attach); module_param_named(debug, ix2505v_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("DVB IX2505V tuner driver"); MODULE_AUTHOR("Malcolm Priestley"); MODULE_LICENSE("GPL");
gpl-2.0
Epirex/android_kernel_samsung_golden
drivers/net/cxgb4/t4_hw.c
7993
89030
/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/delay.h> #include "cxgb4.h" #include "t4_regs.h" #include "t4fw_api.h" /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * @valp: where to store the value of the register at completion time * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. If @valp is not NULL the value of the register * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp) { while (1) { u32 val = t4_read_reg(adapter, reg); if (!!(val & mask) == polarity) { if (valp) *valp = val; return 0; } if (--attempts == 0) return -EAGAIN; if (delay) udelay(delay); } } static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, delay, NULL); } /** * t4_set_reg_field - set a register field to a value * @adapter: the adapter to program * @addr: the register address * @mask: specifies the portion of the register to modify * @val: the new value for the register field * * Sets a register field specified by the supplied mask to the * given value. */ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val) { u32 v = t4_read_reg(adapter, addr) & ~mask; t4_write_reg(adapter, addr, v | val); (void) t4_read_reg(adapter, addr); /* flush */ } /** * t4_read_indirect - read indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect address * @data_reg: register holding the value of the indirect register * @vals: where the read register values are stored * @nregs: how many indirect registers to read * @start_idx: index of first indirect register to read * * Reads registers that are accessed indirectly through an address/data * register pair. */ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx); *vals++ = t4_read_reg(adap, data_reg); start_idx++; } } /* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr) { for ( ; nflit; nflit--, mbox_addr += 8) *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); } /* * Handle a FW assertion reported in a mailbox. */ static void fw_asrt(struct adapter *adap, u32 mbox_addr) { struct fw_debug_cmd asrt; get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); dev_alert(adap->pdev_dev, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); } static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) { dev_err(adap->pdev_dev, "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, (unsigned long long)t4_read_reg64(adap, data_reg), (unsigned long long)t4_read_reg64(adap, data_reg + 8), (unsigned long long)t4_read_reg64(adap, data_reg + 16), (unsigned long long)t4_read_reg64(adap, data_reg + 24), (unsigned long long)t4_read_reg64(adap, data_reg + 32), (unsigned long long)t4_read_reg64(adap, data_reg + 40), (unsigned long long)t4_read_reg64(adap, data_reg + 48), (unsigned long long)t4_read_reg64(adap, data_reg + 56)); } /** * t4_wr_mbox_meat - send a command to FW through the given mailbox * @adap: the adapter * @mbox: index of the mailbox to use * @cmd: the command to write * @size: command length in bytes * @rpl: where to optionally store the reply * @sleep_ok: if true we may sleep while awaiting command completion * * Sends the given command to FW through the selected mailbox and waits * for the FW to execute the command. If @rpl is not %NULL it is used to * store the FW's reply to the command. The command and its optional * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms * to respond. @sleep_ok determines whether we may sleep while awaiting * the response. If sleeping is allowed we use progressive backoff * otherwise we spin. * * The return value is 0 on success or a negative errno on failure. A * failure can happen either because we are not able to execute the * command or FW executes it but signals an error. In the latter case * the return value is the error code indicated by FW (negated). */ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok) { static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; u32 v; u64 res; int i, ms, delay_idx; const __be64 *p = cmd; u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); if ((size & 15) || size > MBOX_LEN) return -EINVAL; /* * If the device is off-line, as in EEH, commands will time out. * Fail them early so we don't waste time waiting. */ if (adap->pdev->error_state != pci_channel_io_normal) return -EIO; v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); if (v != MBOX_OWNER_DRV) return v ? -EBUSY : -ETIMEDOUT; for (i = 0; i < size; i += 8) t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adap, ctl_reg); /* flush write */ delay_idx = 0; ms = delay[0]; for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) delay_idx++; msleep(ms); } else mdelay(ms); v = t4_read_reg(adap, ctl_reg); if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { if (!(v & MBMSGVALID)) { t4_write_reg(adap, ctl_reg, 0); continue; } res = t4_read_reg64(adap, data_reg); if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); res = FW_CMD_RETVAL(EIO); } else if (rpl) get_mbox_rpl(adap, rpl, size / 8, data_reg); if (FW_CMD_RETVAL_GET((int)res)) dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); return -FW_CMD_RETVAL_GET((int)res); } } dump_mbox(adap, mbox, data_reg); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); return -ETIMEDOUT; } /** * t4_mc_read - read from MC through backdoor accesses * @adap: the adapter * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from MC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) { int i; if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) return -EBUSY; t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); t4_write_reg(adap, MC_BIST_CMD_LEN, 64); t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1)); i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); if (i) return i; #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, MC_DATA(16)); #undef MC_DATA return 0; } /** * t4_edc_read - read from EDC through backdoor accesses * @adap: the adapter * @idx: which EDC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from EDC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; idx *= EDC_STRIDE; if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) return -EBUSY; t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); t4_write_reg(adap, EDC_BIST_CMD + idx, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); if (i) return i; #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, EDC_DATA(16)); #undef EDC_DATA return 0; } #define EEPROM_STAT_ADDR 0x7bfc #define VPD_BASE 0 #define VPD_LEN 512 /** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: whether to enable or disable write protection * * Enables or disables write protection on the serial EEPROM. */ int t4_seeprom_wp(struct adapter *adapter, bool enable) { unsigned int v = enable ? 0xc : 0; int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); return ret < 0 ? ret : 0; } /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * * Reads card parameters stored in VPD EEPROM. */ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { int i, ret; int ec, sn; u8 vpd[VPD_LEN], csum; unsigned int vpdr_len, kw_offset, id_len; ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); if (ret < 0) return ret; if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { dev_err(adapter->pdev_dev, "missing VPD ID string\n"); return -EINVAL; } id_len = pci_vpd_lrdt_size(vpd); if (id_len > ID_LEN) id_len = ID_LEN; i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) { dev_err(adapter->pdev_dev, "missing VPD-R section\n"); return -EINVAL; } vpdr_len = pci_vpd_lrdt_size(&vpd[i]); kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; if (vpdr_len + kw_offset > VPD_LEN) { dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); return -EINVAL; } #define FIND_VPD_KW(var, name) do { \ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ if (var < 0) { \ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ return -EINVAL; \ } \ var += PCI_VPD_INFO_FLD_HDR_SIZE; \ } while (0) FIND_VPD_KW(i, "RV"); for (csum = 0; i >= 0; i--) csum += vpd[i]; if (csum) { dev_err(adapter->pdev_dev, "corrupted VPD EEPROM, actual csum %u\n", csum); return -EINVAL; } FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); #undef FIND_VPD_KW memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); strim(p->id); memcpy(p->ec, vpd + ec, EC_LEN); strim(p->ec); i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); return 0; } /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ /* flash command opcodes */ SF_PROG_PAGE = 2, /* program page */ SF_WR_DISABLE = 4, /* disable writes */ SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase sector */ FW_MAX_SIZE = 512 * 1024, }; /** * sf1_read - read data from the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to read * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @valp: where to store the read data * * Reads up to 4 bytes of data from the serial flash. The location of * the read needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 *valp) { int ret; if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, SF_OP) & BUSY) return -EBUSY; cont = cont ? SF_CONT : 0; lock = lock ? SF_LOCK : 0; t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); if (!ret) *valp = t4_read_reg(adapter, SF_DATA); return ret; } /** * sf1_write - write data to the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to write * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @val: value to write * * Writes up to 4 bytes of data to the serial flash. The location of * the write needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 val) { if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, SF_OP) & BUSY) return -EBUSY; cont = cont ? SF_CONT : 0; lock = lock ? SF_LOCK : 0; t4_write_reg(adapter, SF_DATA, val); t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1) | OP_WR); return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); } /** * flash_wait_op - wait for a flash operation to complete * @adapter: the adapter * @attempts: max number of polls of the status register * @delay: delay between polls in ms * * Wait for a flash operation to complete by polling the status register. */ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) { int ret; u32 status; while (1) { if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) return ret; if (!(status & 1)) return 0; if (--attempts == 0) return -EAGAIN; if (delay) msleep(delay); } } /** * t4_read_flash - read words from serial flash * @adapter: the adapter * @addr: the start address for the read * @nwords: how many 32-bit words to read * @data: where to store the read data * @byte_oriented: whether to store data as bytes or as words * * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's * natural endianess. */ static int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) { int ret; if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) return ret; for ( ; nwords; nwords--, data++) { ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); if (nwords == 1) t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ if (ret) return ret; if (byte_oriented) *data = htonl(*data); } return 0; } /** * t4_write_flash - write up to a page of data to the serial flash * @adapter: the adapter * @addr: the start address to write * @n: length of data to write in bytes * @data: the data to write * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. All the data must be written to the same page. */ static int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data) { int ret; u32 buf[64]; unsigned int i, c, left, val, offset = addr & 0xff; if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) goto unlock; for (left = n; left; left -= c) { c = min(left, 4U); for (val = 0, i = 0; i < c; ++i) val = (val << 8) + *data++; ret = sf1_write(adapter, c, c != left, 1, val); if (ret) goto unlock; } ret = flash_wait_op(adapter, 8, 1); if (ret) goto unlock; t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ /* Read the page to verify the write succeeded */ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); if (ret) return ret; if (memcmp(data - n, (u8 *)buf + offset, n)) { dev_err(adapter->pdev_dev, "failed to correctly write the flash page at %#x\n", addr); return -EIO; } return 0; unlock: t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ return ret; } /** * get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ static int get_fw_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ static int get_tp_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } /** * t4_check_fw_version - check if the FW is compatible with this driver * @adapter: the adapter * * Checks if an adapter's FW is compatible with the driver. Returns 0 * if there's exact match, a negative error if the version could not be * read or there's a major version mismatch, and a positive value if the * expected major version is found but there's a minor version mismatch. */ int t4_check_fw_version(struct adapter *adapter) { u32 api_vers[2]; int ret, major, minor, micro; ret = get_fw_version(adapter, &adapter->params.fw_vers); if (!ret) ret = get_tp_version(adapter, &adapter->params.tp_vers); if (!ret) ret = t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, intfver_nic), 2, api_vers, 1); if (ret) return ret; major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); memcpy(adapter->params.api_vers, api_vers, sizeof(adapter->params.api_vers)); if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ dev_err(adapter->pdev_dev, "card FW has major version %u, driver wants %u\n", major, FW_VERSION_MAJOR); return -EINVAL; } if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) return 0; /* perfect match */ /* Minor/micro version mismatch. Report it but often it's OK. */ return 1; } /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter * @start: the first sector to erase * @end: the last sector to erase * * Erases the sectors in the given inclusive range. */ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, SF_ERASE_SECTOR | (start << 8))) != 0 || (ret = flash_wait_op(adapter, 14, 500)) != 0) { dev_err(adapter->pdev_dev, "erase of flash sector %d failed, error %d\n", start, ret); break; } start++; } t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ return ret; } /** * t4_load_fw - download firmware * @adap: the adapter * @fw_data: the firmware image to write * @size: image size * * Write the supplied firmware image to the card's serial flash. */ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) { u32 csum; int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; const u32 *p = (const u32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_img_start = adap->params.sf_fw_start; unsigned int fw_start_sec = fw_img_start / sf_sec_size; if (!size) { dev_err(adap->pdev_dev, "FW image has no data\n"); return -EINVAL; } if (size & 511) { dev_err(adap->pdev_dev, "FW image size not multiple of 512 bytes\n"); return -EINVAL; } if (ntohs(hdr->len512) * 512 != size) { dev_err(adap->pdev_dev, "FW image size differs from size in FW header\n"); return -EINVAL; } if (size > FW_MAX_SIZE) { dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", FW_MAX_SIZE); return -EFBIG; } for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); if (csum != 0xffffffff) { dev_err(adap->pdev_dev, "corrupted firmware image, checksum %#x\n", csum); return -EINVAL; } i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); if (ret) goto out; /* * We write the correct version at the end so the driver can see a bad * version if the FW write fails. Start by writing a copy of the * first page with a bad version. */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); if (ret) goto out; addr = fw_img_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); if (ret) goto out; } ret = t4_write_flash(adap, fw_img_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", ret); return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) /** * t4_link_start - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired, and reset. * - If the PHY does not auto-negotiate just reset it. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) fc |= FW_PORT_CAP_FC_RX; if (lc->requested_fc & PAUSE_TX) fc |= FW_PORT_CAP_FC_TX; memset(&c, 0, sizeof(c)); c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); if (!(lc->supported & FW_PORT_CAP_ANEG)) { c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else if (lc->autoneg == AUTONEG_DISABLE) { c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_restart_aneg - restart autonegotiation * @adap: the adapter * @mbox: mbox to use for the FW command * @port: the port id * * Restarts autonegotiation for the selected port. */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { struct fw_port_cmd c; memset(&c, 0, sizeof(c)); c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } struct intr_info { unsigned int mask; /* bits to check in interrupt status */ const char *msg; /* message to print or NULL */ short stat_idx; /* stat counter to increment or -1 */ unsigned short fatal; /* whether the condition reported is fatal */ }; /** * t4_handle_intr_status - table driven interrupt handler * @adapter: the adapter that generated the interrupt * @reg: the interrupt status register to process * @acts: table of interrupt actions * * A table driven interrupt handler that applies a set of masks to an * interrupt status word and performs the corresponding actions if the * interrupts described by the mask have occurred. The actions include * optionally emitting a warning or alert message. The table is terminated * by an entry specifying mask 0. Returns the number of fatal interrupt * conditions. */ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, const struct intr_info *acts) { int fatal = 0; unsigned int mask = 0; unsigned int status = t4_read_reg(adapter, reg); for ( ; acts->mask; ++acts) { if (!(status & acts->mask)) continue; if (acts->fatal) { fatal++; dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, status & acts->mask); } else if (acts->msg && printk_ratelimit()) dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, status & acts->mask); mask |= acts->mask; } status &= mask; if (status) /* clear processed interrupts */ t4_write_reg(adapter, reg, status); return fatal; } /* * Interrupt handler for the PCIE module. */ static void pcie_intr_handler(struct adapter *adapter) { static const struct intr_info sysbus_intr_info[] = { { RNPP, "RXNP array parity error", -1, 1 }, { RPCP, "RXPC array parity error", -1, 1 }, { RCIP, "RXCIF array parity error", -1, 1 }, { RCCP, "Rx completions control array parity error", -1, 1 }, { RFTP, "RXFT array parity error", -1, 1 }, { 0 } }; static const struct intr_info pcie_port_intr_info[] = { { TPCP, "TXPC array parity error", -1, 1 }, { TNPP, "TXNP array parity error", -1, 1 }, { TFTP, "TXFT array parity error", -1, 1 }, { TCAP, "TXCA array parity error", -1, 1 }, { TCIP, "TXCIF array parity error", -1, 1 }, { RCAP, "RXCA array parity error", -1, 1 }, { OTDD, "outbound request TLP discarded", -1, 1 }, { RDPE, "Rx data parity error", -1, 1 }, { TDUE, "Tx uncorrectable data error", -1, 1 }, { 0 } }; static const struct intr_info pcie_intr_info[] = { { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, { MSIDATAPERR, "MSI data parity error", -1, 1 }, { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, { FIDPERR, "PCI FID parity error", -1, 1 }, { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, { MATAGPERR, "PCI MA tag parity error", -1, 1 }, { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, { RXWRPERR, "PCI Rx write parity error", -1, 1 }, { RPLPERR, "PCI replay buffer parity error", -1, 1 }, { PCIESINT, "PCI core secondary fault", -1, 1 }, { PCIEPINT, "PCI core primary fault", -1, 1 }, { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, sysbus_intr_info) + t4_handle_intr_status(adapter, PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); if (fat) t4_fatal_err(adapter); } /* * TP interrupt handler. */ static void tp_intr_handler(struct adapter *adapter) { static const struct intr_info tp_intr_info[] = { { 0x3fffffff, "TP parity error", -1, 1 }, { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) t4_fatal_err(adapter); } /* * SGE interrupt handler. */ static void sge_intr_handler(struct adapter *adapter) { u64 v; static const struct intr_info sge_intr_info[] = { { ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size", -1, 1 }, { ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 0 }, { ERR_ING_CTXT_PRIO, "SGE too many priority ingress contexts", -1, 0 }, { ERR_EGR_CTXT_PRIO, "SGE too many priority egress contexts", -1, 0 }, { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, { 0 } }; v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); if (v) { dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", (unsigned long long)v); t4_write_reg(adapter, SGE_INT_CAUSE1, v); t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); } if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || v != 0) t4_fatal_err(adapter); } /* * CIM interrupt handler. */ static void cim_intr_handler(struct adapter *adapter) { static const struct intr_info cim_intr_info[] = { { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, { OBQPARERR, "CIM OBQ parity error", -1, 1 }, { IBQPARERR, "CIM IBQ parity error", -1, 1 }, { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, { 0 } }; static const struct intr_info cim_upintr_info[] = { { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, { ILLWRINT, "CIM illegal write", -1, 1 }, { ILLRDINT, "CIM illegal read", -1, 1 }, { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, cim_intr_info) + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, cim_upintr_info); if (fat) t4_fatal_err(adapter); } /* * ULP RX interrupt handler. */ static void ulprx_intr_handler(struct adapter *adapter) { static const struct intr_info ulprx_intr_info[] = { { 0x1800000, "ULPRX context error", -1, 1 }, { 0x7fffff, "ULPRX parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) t4_fatal_err(adapter); } /* * ULP TX interrupt handler. */ static void ulptx_intr_handler(struct adapter *adapter) { static const struct intr_info ulptx_intr_info[] = { { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 0 }, { 0xfffffff, "ULPTX parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) t4_fatal_err(adapter); } /* * PM TX interrupt handler. */ static void pmtx_intr_handler(struct adapter *adapter) { static const struct intr_info pmtx_intr_info[] = { { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, { 0 } }; if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) t4_fatal_err(adapter); } /* * PM RX interrupt handler. */ static void pmrx_intr_handler(struct adapter *adapter) { static const struct intr_info pmrx_intr_info[] = { { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, { 0 } }; if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) t4_fatal_err(adapter); } /* * CPL switch interrupt handler. */ static void cplsw_intr_handler(struct adapter *adapter) { static const struct intr_info cplsw_intr_info[] = { { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) t4_fatal_err(adapter); } /* * LE interrupt handler. */ static void le_intr_handler(struct adapter *adap) { static const struct intr_info le_intr_info[] = { { LIPMISS, "LE LIP miss", -1, 0 }, { LIP0, "LE 0 LIP error", -1, 0 }, { PARITYERR, "LE parity error", -1, 1 }, { UNKNOWNCMD, "LE unknown command", -1, 1 }, { REQQPARERR, "LE request queue parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) t4_fatal_err(adap); } /* * MPS interrupt handler. */ static void mps_intr_handler(struct adapter *adapter) { static const struct intr_info mps_rx_intr_info[] = { { 0xffffff, "MPS Rx parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_tx_intr_info[] = { { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, { BUBBLE, "MPS Tx underflow", -1, 1 }, { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, { FRMERR, "MPS Tx framing error", -1, 1 }, { 0 } }; static const struct intr_info mps_trc_intr_info[] = { { FILTMEM, "MPS TRC filter parity error", -1, 1 }, { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, { MISCPERR, "MPS TRC misc parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_sram_intr_info[] = { { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_tx_intr_info[] = { { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_rx_intr_info[] = { { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_cls_intr_info[] = { { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, mps_rx_intr_info) + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, mps_tx_intr_info) + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, mps_trc_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, mps_stat_sram_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, mps_stat_tx_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, mps_stat_rx_intr_info) + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, mps_cls_intr_info); t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | RXINT | TXINT | STATINT); t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ if (fat) t4_fatal_err(adapter); } #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) /* * EDC/MC interrupt handler. */ static void mem_intr_handler(struct adapter *adapter, int idx) { static const char name[3][5] = { "EDC0", "EDC1", "MC" }; unsigned int addr, cnt_addr, v; if (idx <= MEM_EDC1) { addr = EDC_REG(EDC_INT_CAUSE, idx); cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); } else { addr = MC_INT_CAUSE; cnt_addr = MC_ECC_STATUS; } v = t4_read_reg(adapter, addr) & MEM_INT_MASK; if (v & PERR_INT_CAUSE) dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", name[idx]); if (v & ECC_CE_INT_CAUSE) { u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); if (printk_ratelimit()) dev_warn(adapter->pdev_dev, "%u %s correctable ECC data error%s\n", cnt, name[idx], cnt > 1 ? "s" : ""); } if (v & ECC_UE_INT_CAUSE) dev_alert(adapter->pdev_dev, "%s uncorrectable ECC data error\n", name[idx]); t4_write_reg(adapter, addr, v); if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) t4_fatal_err(adapter); } /* * MA interrupt handler. */ static void ma_intr_handler(struct adapter *adap) { u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); if (status & MEM_PERR_INT_CAUSE) dev_alert(adap->pdev_dev, "MA parity error, parity status %#x\n", t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); if (status & MEM_WRAP_INT_CAUSE) { v = t4_read_reg(adap, MA_INT_WRAP_STATUS); dev_alert(adap->pdev_dev, "MA address wrap-around error by " "client %u to address %#x\n", MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); } t4_write_reg(adap, MA_INT_CAUSE, status); t4_fatal_err(adap); } /* * SMB interrupt handler. */ static void smb_intr_handler(struct adapter *adap) { static const struct intr_info smb_intr_info[] = { { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) t4_fatal_err(adap); } /* * NC-SI interrupt handler. */ static void ncsi_intr_handler(struct adapter *adap) { static const struct intr_info ncsi_intr_info[] = { { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) t4_fatal_err(adap); } /* * XGMAC interrupt handler. */ static void xgmac_intr_handler(struct adapter *adap, int port) { u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; if (!v) return; if (v & TXFIFO_PRTY_ERR) dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", port); if (v & RXFIFO_PRTY_ERR) dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", port); t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); t4_fatal_err(adap); } /* * PL interrupt handler. */ static void pl_intr_handler(struct adapter *adap) { static const struct intr_info pl_intr_info[] = { { FATALPERR, "T4 fatal parity error", -1, 1 }, { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) t4_fatal_err(adap); } #define PF_INTR_MASK (PFSW) #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ CPL_SWITCH | SGE | ULP_TX) /** * t4_slow_intr_handler - control path interrupt handler * @adapter: the adapter * * T4 interrupt handler for non-data global interrupt events, e.g., errors. * The designation 'slow' is because it involves register reads, while * data interrupts typically don't involve any MMIOs. */ int t4_slow_intr_handler(struct adapter *adapter) { u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); if (!(cause & GLBL_INTR_MASK)) return 0; if (cause & CIM) cim_intr_handler(adapter); if (cause & MPS) mps_intr_handler(adapter); if (cause & NCSI) ncsi_intr_handler(adapter); if (cause & PL) pl_intr_handler(adapter); if (cause & SMB) smb_intr_handler(adapter); if (cause & XGMAC0) xgmac_intr_handler(adapter, 0); if (cause & XGMAC1) xgmac_intr_handler(adapter, 1); if (cause & XGMAC_KR0) xgmac_intr_handler(adapter, 2); if (cause & XGMAC_KR1) xgmac_intr_handler(adapter, 3); if (cause & PCIE) pcie_intr_handler(adapter); if (cause & MC) mem_intr_handler(adapter, MEM_MC); if (cause & EDC0) mem_intr_handler(adapter, MEM_EDC0); if (cause & EDC1) mem_intr_handler(adapter, MEM_EDC1); if (cause & LE) le_intr_handler(adapter); if (cause & TP) tp_intr_handler(adapter); if (cause & MA) ma_intr_handler(adapter); if (cause & PM_TX) pmtx_intr_handler(adapter); if (cause & PM_RX) pmrx_intr_handler(adapter); if (cause & ULP_RX) ulprx_intr_handler(adapter); if (cause & CPL_SWITCH) cplsw_intr_handler(adapter); if (cause & SGE) sge_intr_handler(adapter); if (cause & ULP_TX) ulptx_intr_handler(adapter); /* Clear the interrupts just processed for which we are the master. */ t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ return 1; } /** * t4_intr_enable - enable interrupts * @adapter: the adapter whose interrupts should be enabled * * Enable PF-specific interrupts for the calling function and the top-level * interrupt concentrator for global interrupts. Interrupts are already * enabled at each module, here we just enable the roots of the interrupt * hierarchies. * * Note: this function should be called only when the driver manages * non PF-specific interrupts from the various HW modules. Only one PCI * function at a time should be doing this. */ void t4_intr_enable(struct adapter *adapter) { u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | EGRESS_SIZE_ERR); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); } /** * t4_intr_disable - disable interrupts * @adapter: the adapter whose interrupts should be disabled * * Disable interrupts. We only disable the top-level interrupt * concentrators. The caller must be a PCI function managing global * interrupts. */ void t4_intr_disable(struct adapter *adapter) { u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); } /** * hash_mac_addr - return the hash value of a MAC address * @addr: the 48-bit Ethernet MAC address * * Hashes a MAC address according to the hash function used by HW inexact * (hash) address matching. */ static int hash_mac_addr(const u8 *addr) { u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; a ^= b; a ^= (a >> 12); a ^= (a >> 6); return a & 0x3f; } /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: virtual interface whose RSS subtable is to be written * @start: start entry in the table to write * @n: how many table entries to write * @rspq: values for the response queue lookup table * @nrspq: number of values in @rspq * * Programs the selected part of the VI's RSS mapping table with the * provided values. If @nrspq < @n the supplied values are used repeatedly * until the full table range is populated. * * The caller must ensure the values in @rspq are in the range allowed for * @viid. */ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) { int ret; const u16 *rsp = rspq; const u16 *rsp_end = rspq + nrspq; struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_RSS_IND_TBL_CMD_VIID(viid)); cmd.retval_len16 = htonl(FW_LEN16(cmd)); /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ while (n > 0) { int nq = min(n, 32); __be32 *qp = &cmd.iq0_to_iq2; cmd.niqid = htons(nq); cmd.startidx = htons(start); start += nq; n -= nq; while (nq > 0) { unsigned int v; v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); if (++rsp >= rsp_end) rsp = rspq; v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); if (++rsp >= rsp_end) rsp = rspq; v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); if (++rsp >= rsp_end) rsp = rspq; *qp++ = htonl(v); nq -= 3; } ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); if (ret) return ret; } return 0; } /** * t4_config_glbl_rss - configure the global RSS mode * @adapter: the adapter * @mbox: mbox to use for the FW command * @mode: global RSS mode * @flags: mode-specific flags * * Sets the global RSS mode. */ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags) { struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE); c.retval_len16 = htonl(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); } else return -EINVAL; return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /** * t4_tp_get_tcp_stats - read TP's TCP MIB counters * @adap: the adapter * @v4: holds the TCP/IP counter values * @v6: holds the TCP/IPv6 counter values * * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. * Either @v4 or @v6 may be %NULL to skip the corresponding stats. */ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6) { u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) #define STAT(x) val[STAT_IDX(x)] #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) if (v4) { t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); v4->tcpOutRsts = STAT(OUT_RST); v4->tcpInSegs = STAT64(IN_SEG); v4->tcpOutSegs = STAT64(OUT_SEG); v4->tcpRetransSegs = STAT64(RXT_SEG); } if (v6) { t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); v6->tcpOutRsts = STAT(OUT_RST); v6->tcpInSegs = STAT64(IN_SEG); v6->tcpOutSegs = STAT64(OUT_SEG); v6->tcpRetransSegs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter * @mtus: where to store the MTU values * @mtu_log: where to store the MTU base-2 log (may be %NULL) * * Reads the HW path MTU table. */ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) { u32 v; int i; for (i = 0; i < NMTUS; ++i) { t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(0xff) | MTUVALUE(i)); v = t4_read_reg(adap, TP_MTU_TABLE); mtus[i] = MTUVALUE_GET(v); if (mtu_log) mtu_log[i] = MTUWIDTH_GET(v); } } /** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control * * Initialize the congestion control parameters. */ static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) { a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[9] = 2; a[10] = 3; a[11] = 4; a[12] = 5; a[13] = 6; a[14] = 7; a[15] = 8; a[16] = 9; a[17] = 10; a[18] = 14; a[19] = 17; a[20] = 21; a[21] = 25; a[22] = 30; a[23] = 35; a[24] = 45; a[25] = 60; a[26] = 80; a[27] = 100; a[28] = 200; a[29] = 300; a[30] = 400; a[31] = 500; b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; b[9] = b[10] = 1; b[11] = b[12] = 2; b[13] = b[14] = b[15] = b[16] = 3; b[17] = b[18] = b[19] = b[20] = b[21] = 4; b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; b[28] = b[29] = 6; b[30] = b[31] = 7; } /* The minimum additive increment value for the congestion control table */ #define CC_MIN_INCR 2U /** * t4_load_mtus - write the MTU and congestion control HW tables * @adap: the adapter * @mtus: the values for the MTU table * @alpha: the values for the congestion control alpha parameter * @beta: the values for the congestion control beta parameter * * Write the HW MTU table with the supplied MTUs and the high-speed * congestion control table with the supplied alpha, beta, and MTUs. * We write the two tables together because the additive increments * depend on the MTUs. */ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta) { static const unsigned int avg_pkts[NCCTRL_WIN] = { 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; unsigned int i, w; for (i = 0; i < NMTUS; ++i) { unsigned int mtu = mtus[i]; unsigned int log2 = fls(mtu); if (!(mtu & ((1 << log2) >> 2))) /* round */ log2--; t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | MTUWIDTH(log2) | MTUVALUE(mtu)); for (w = 0; w < NCCTRL_WIN; ++w) { unsigned int inc; inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], CC_MIN_INCR); t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | (w << 16) | (beta[w] << 13) | inc); } } } /** * get_mps_bg_map - return the buffer groups associated with a port * @adap: the adapter * @idx: the port index * * Returns a bitmap indicating which MPS buffer groups are associated * with the given port. Bit i is set if buffer group i is used by the * port. */ static unsigned int get_mps_bg_map(struct adapter *adap, int idx) { u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); if (n == 0) return idx == 0 ? 0xf : 0; if (n == 1) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /** * t4_get_port_stats - collect port statistics * @adap: the adapter * @idx: the port index * @p: the stats structure to fill * * Collect statistics related to the given port from HW. */ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = get_mps_bg_map(adap, idx); #define GET_STAT(name) \ t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) p->tx_octets = GET_STAT(TX_PORT_BYTES); p->tx_frames = GET_STAT(TX_PORT_FRAMES); p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); p->tx_error_frames = GET_STAT(TX_PORT_ERROR); p->tx_frames_64 = GET_STAT(TX_PORT_64B); p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); p->tx_drop = GET_STAT(TX_PORT_DROP); p->tx_pause = GET_STAT(TX_PORT_PAUSE); p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); p->rx_runt = GET_STAT(RX_PORT_LESS_64B); p->rx_frames_64 = GET_STAT(RX_PORT_64B); p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); p->rx_pause = GET_STAT(RX_PORT_PAUSE); p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; #undef GET_STAT #undef GET_STAT_COM } /** * t4_wol_magic_enable - enable/disable magic packet WoL * @adap: the adapter * @port: the physical port index * @addr: MAC address expected in magic packets, %NULL to disable * * Enables/disables magic packet wake-on-LAN for the selected port. */ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr) { if (addr) { t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]); t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), (addr[0] << 8) | addr[1]); } t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, addr ? MAGICEN : 0); } /** * t4_wol_pat_enable - enable/disable pattern-based WoL * @adap: the adapter * @port: the physical port index * @map: bitmap of which HW pattern filters to set * @mask0: byte mask for bytes 0-63 of a packet * @mask1: byte mask for bytes 64-127 of a packet * @crc: Ethernet CRC for selected bytes * @enable: enable/disable switch * * Sets the pattern filters indicated in @map to mask out the bytes * specified in @mask0/@mask1 in received packets and compare the CRC of * the resulting packet against @crc. If @enable is %true pattern-based * WoL is enabled, otherwise disabled. */ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable) { int i; if (!enable) { t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), PATEN, 0); return 0; } if (map > 0xff) return -EINVAL; #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); t4_write_reg(adap, EPIO_REG(DATA2), mask1); t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); for (i = 0; i < NWOL_PAT; i++, map >>= 1) { if (!(map & 1)) continue; /* write byte masks */ t4_write_reg(adap, EPIO_REG(DATA0), mask0); t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) return -ETIMEDOUT; /* write CRC */ t4_write_reg(adap, EPIO_REG(DATA0), crc); t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) return -ETIMEDOUT; } #undef EPIO_REG t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); return 0; } #define INIT_CMD(var, cmd, rd_wr) do { \ (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ FW_CMD_REQUEST | FW_CMD_##rd_wr); \ (var).retval_len16 = htonl(FW_LEN16(var)); \ } while (0) /** * t4_mdio_rd - read a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to read * @valp: where to store the value * * Issues a FW command through the given mailbox to read a PHY register. */ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 *valp) { int ret; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = htons(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) *valp = ntohs(c.u.mdio.rval); return ret; } /** * t4_mdio_wr - write a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write * @valp: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 val) { struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = htons(reg); c.u.mdio.rval = htons(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @evt_mbox: mailbox to receive async FW events * @master: specifies the caller's willingness to be the device master * @state: returns the current device state * * Issues a command to establish communication with FW. */ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state) { int ret; struct fw_hello_cmd c; INIT_CMD(c, HELLO, WRITE); c.err_to_mbasyncnot = htonl( FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0 && state) { u32 v = ntohl(c.err_to_mbasyncnot); if (v & FW_HELLO_CMD_INIT) *state = DEV_STATE_INIT; else if (v & FW_HELLO_CMD_ERR) *state = DEV_STATE_ERR; else *state = DEV_STATE_UNINIT; } return ret; } /** * t4_fw_bye - end communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to terminate communication with FW. */ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_init_cmd - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to FW to partially initialize the device. This * performs initialization that generally doesn't depend on user input. */ int t4_early_init(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_reset - issue a reset to FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @reset: specifies the type of reset to perform * * Issues a reset command of the specified type to FW. */ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; INIT_CMD(c, RESET, WRITE); c.val = htonl(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_query_params - query FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Reads the value of FW or device parameters. Up to 7 parameters can be * queried at once. */ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val) { int i, ret; struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); for (i = 0; i < nparams; i++, p += 2) *p = htonl(*params++); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) *val++ = ntohl(*p); return ret; } /** * t4_set_params - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val) { struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); while (nparams--) { *p++ = htonl(*params++); *p++ = htonl(*val++); } return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_cfg_pfvf - configure PF/VF resource limits * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF being configured * @vf: the VF being configured * @txq: the max number of egress queues * @txq_eth_ctrl: the max number of egress Ethernet or control queues * @rxqi: the max number of interrupt-capable ingress queues * @rxq: the max number of interruptless ingress queues * @tc: the PCI traffic class * @vi: the max number of virtual interfaces * @cmask: the channel access rights mask for the PF/VF * @pmask: the port access rights mask for the PF/VF * @nexact: the maximum number of exact MPS filters * @rcaps: read capabilities * @wxcaps: write/execute capabilities * * Configures resource limits and capabilities for a physical or virtual * function. */ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) { struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | FW_PFVF_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | FW_PFVF_CMD_NIQ(rxq)); c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | FW_PFVF_CMD_PMASK(pmask) | FW_PFVF_CMD_NEQ(txq)); c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | FW_PFVF_CMD_NEXACTF(nexact)); c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | FW_PFVF_CMD_WX_CAPS(wxcaps) | FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_alloc_vi - allocate a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. * @mac should be large enough to hold @nmac Ethernet addresses, they are * stored consecutively so the space needed is @nmac * 6 bytes. * Returns a negative error number or the non-negative VI id. */ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, unsigned int *rss_size) { int ret; struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); c.portid_pkd = FW_VI_CMD_PORTID(port); c.nmac = nmac - 1; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; if (mac) { memcpy(mac, c.mac, sizeof(c.mac)); switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } } if (rss_size) *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); } /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change * @sleep_ok: if true we may sleep while awaiting command completion * * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok) { struct fw_vi_rxmode_cmd c; /* convert to FW values */ if (mtu < 0) mtu = FW_RXMODE_MTU_NO_CHG; if (promisc < 0) promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; if (all_multi < 0) all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; if (bcast < 0) bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; if (vlanex < 0) vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); c.retval_len16 = htonl(FW_LEN16(c)); c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | FW_VI_RXMODE_CMD_PROMISCEN(promisc) | FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @free: if true any existing filters for this VI id are first removed * @naddr: the number of MAC addresses to allocate filters for (up to 7) * @addr: the MAC address(es) * @idx: where to store the index of each allocated filter * @hash: pointer to hash address filter bitmap * @sleep_ok: call is allowed to sleep * * Allocates an exact-match filter for each of the supplied addresses and * sets it to the corresponding address. If @idx is not %NULL it should * have at least @naddr entries, each of which will be set to the index of * the filter allocated for the corresponding MAC address. If a filter * could not be allocated for an address its index is set to 0xffff. * If @hash is not %NULL addresses that fail to allocate an exact filter * are hashed and update the hash filter bitmap pointed at by @hash. * * Returns a negative error number or the number of filters allocated. */ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; if (naddr > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | FW_CMD_LEN16((naddr + 2) / 2)); for (i = 0, p = c.u.exact; i < naddr; i++, p++) { p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); } ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret) return ret; for (i = 0, p = c.u.exact; i < naddr; i++, p++) { u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (idx) idx[i] = index >= NEXACT_MAC ? 0xffff : index; if (index < NEXACT_MAC) ret++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[i])); } return ret; } /** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent * @add_smt: if true also add the address to the HW SMT * * Modifies an exact-match filter and sets it to the new MAC address. * Note that in general it is not possible to modify the value of a given * filter so the generic way to modify an address filter is to free the one * being used by the old address value and allocate a new filter for the * new address value. @idx can be -1 if the address is a new addition. * * Returns a negative error number or the index of the filter with the new * MAC value. */ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, bool add_smt) { int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | FW_VI_MAC_CMD_SMAC_RESULT(mode) | FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (ret >= NEXACT_MAC) ret = -ENOMEM; } return ret; } /** * t4_set_addr_hash - program the MAC inexact-match hash filter * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @ucast: whether the hash filter should also match unicast addresses * @vec: the value to be written to the hash filter * @sleep_ok: call is allowed to sleep * * Sets the 64-bit inexact-match hash filter for a virtual interface. */ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok) { struct fw_vi_mac_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | FW_VI_MAC_CMD_HASHUNIEN(ucast) | FW_CMD_LEN16(1)); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_enable_vi - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * * Enables/disables a virtual interface. */ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @nblinks: how many times to blink LED at 2.5 Hz * * Identifies a VI's port by blinking its LED. */ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks) { struct fw_vi_enable_cmd c; c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); c.blinkdur = htons(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Frees an ingress queue and its associated FLs, if any. */ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); c.iqid = htons(iqid); c.fl0id = htons(fl0id); c.fl1id = htons(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_eth_eq_free - free an Ethernet egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees an Ethernet egress queue. */ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | FW_EQ_ETH_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ctrl_eq_free - free a control egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | FW_EQ_CTRL_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ofld_eq_free - free an offload egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | FW_EQ_OFLD_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message * * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; if (opcode == FW_PORT_CMD) { /* link/module state change message */ int speed = 0, fc = 0; const struct fw_port_cmd *p = (void *)rpl; int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); int port = adap->chan_map[chan]; struct port_info *pi = adap2pinfo(adap, port); struct link_config *lc = &pi->link_cfg; u32 stat = ntohl(p->u.info.lstatus_to_modtype); int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); if (stat & FW_PORT_CMD_RXPAUSE) fc |= PAUSE_RX; if (stat & FW_PORT_CMD_TXPAUSE) fc |= PAUSE_TX; if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) speed = SPEED_100; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) speed = SPEED_1000; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) speed = SPEED_10000; if (link_ok != lc->link_ok || speed != lc->speed || fc != lc->fc) { /* something changed */ lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; t4_os_link_changed(adap, port, link_ok); } if (mod != pi->mod_type) { pi->mod_type = mod; t4_os_portmod_changed(adap, port); } } return 0; } static void __devinit get_pci_mode(struct adapter *adapter, struct pci_params *p) { u16 val; u32 pcie_cap = pci_pcie_cap(adapter->pdev); if (pcie_cap) { pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } } /** * init_link_config - initialize a link's SW state * @lc: structure holding the link state * @caps: link capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ static void __devinit init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; if (lc->supported & FW_PORT_CAP_ANEG) { lc->advertising = lc->supported & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->advertising = 0; lc->autoneg = AUTONEG_DISABLE; } } int t4_wait_dev_ready(struct adapter *adap) { if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) return 0; msleep(500); return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; } static int __devinit get_flash_params(struct adapter *adap) { int ret; u32 info; ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); if (!ret) ret = sf1_read(adap, 3, 0, 1, &info); t4_write_reg(adap, SF_OP, 0); /* unlock SF */ if (ret) return ret; if ((info & 0xff) != 0x20) /* not a Numonix flash */ return -EINVAL; info >>= 16; /* log2 of size */ if (info >= 0x14 && info < 0x18) adap->params.sf_nsec = 1 << (info - 16); else if (info == 0x18) adap->params.sf_nsec = 64; else return -EINVAL; adap->params.sf_size = 1 << info; adap->params.sf_fw_start = t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; return 0; } /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter * @reset: if true perform a HW reset * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and * initialize the MDIO interface. */ int __devinit t4_prep_adapter(struct adapter *adapter) { int ret; ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); adapter->params.rev = t4_read_reg(adapter, PL_REV); ret = get_flash_params(adapter); if (ret < 0) { dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); return ret; } ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* * Default port for debugging in case we can't reach FW. */ adapter->params.nports = 1; adapter->params.portvec = 1; return 0; } int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { u8 addr[6]; int ret, i, j = 0; struct fw_port_cmd c; struct fw_rss_vi_config_cmd rvc; memset(&c, 0, sizeof(c)); memset(&rvc, 0, sizeof(rvc)); for_each_port(adap, i) { unsigned int rss_size; struct port_info *p = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PORT_CMD_PORTID(j)); c.action_to_len16 = htonl( FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(c)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); if (ret < 0) return ret; p->viid = ret; p->tx_chan = j; p->lport = j; p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? FW_PORT_CMD_MDIOADDR_GET(ret) : -1; p->port_type = FW_PORT_CMD_PTYPE_GET(ret); p->mod_type = FW_PORT_MOD_TYPE_NA; rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); rvc.retval_len16 = htonl(FW_LEN16(rvc)); ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); if (ret) return ret; p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); j++; } return 0; }
gpl-2.0
Jason-Choi/EastSea-Kernel
drivers/tty/tty_audit.c
7993
8755
/* * Creating audit events from TTY input. * * Copyright (C) 2007 Red Hat, Inc. All rights reserved. This copyrighted * material is made available to anyone wishing to use, modify, copy, or * redistribute it subject to the terms and conditions of the GNU General * Public License v.2. * * Authors: Miloslav Trmac <mitr@redhat.com> */ #include <linux/audit.h> #include <linux/slab.h> #include <linux/tty.h> struct tty_audit_buf { atomic_t count; struct mutex mutex; /* Protects all data below */ int major, minor; /* The TTY which the data is from */ unsigned icanon:1; size_t valid; unsigned char *data; /* Allocated size N_TTY_BUF_SIZE */ }; static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor, int icanon) { struct tty_audit_buf *buf; buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) goto err; buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL); if (!buf->data) goto err_buf; atomic_set(&buf->count, 1); mutex_init(&buf->mutex); buf->major = major; buf->minor = minor; buf->icanon = icanon; buf->valid = 0; return buf; err_buf: kfree(buf); err: return NULL; } static void tty_audit_buf_free(struct tty_audit_buf *buf) { WARN_ON(buf->valid != 0); kfree(buf->data); kfree(buf); } static void tty_audit_buf_put(struct tty_audit_buf *buf) { if (atomic_dec_and_test(&buf->count)) tty_audit_buf_free(buf); } static void tty_audit_log(const char *description, struct task_struct *tsk, uid_t loginuid, unsigned sessionid, int major, int minor, unsigned char *data, size_t size) { struct audit_buffer *ab; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); if (ab) { char name[sizeof(tsk->comm)]; uid_t uid = task_uid(tsk); audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " "major=%d minor=%d comm=", description, tsk->pid, uid, loginuid, sessionid, major, minor); get_task_comm(name, tsk); audit_log_untrustedstring(ab, name); audit_log_format(ab, " data="); audit_log_n_hex(ab, data, size); audit_log_end(ab); } } /** * tty_audit_buf_push - Push buffered data out * * Generate an audit message from the contents of @buf, which is owned by * @tsk with @loginuid. @buf->mutex must be locked. */ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid, unsigned int sessionid, struct tty_audit_buf *buf) { if (buf->valid == 0) return; if (audit_enabled == 0) { buf->valid = 0; return; } tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, buf->data, buf->valid); buf->valid = 0; } /** * tty_audit_buf_push_current - Push buffered data out * * Generate an audit message from the contents of @buf, which is owned by * the current task. @buf->mutex must be locked. */ static void tty_audit_buf_push_current(struct tty_audit_buf *buf) { uid_t auid = audit_get_loginuid(current); unsigned int sessionid = audit_get_sessionid(current); tty_audit_buf_push(current, auid, sessionid, buf); } /** * tty_audit_exit - Handle a task exit * * Make sure all buffered data is written out and deallocate the buffer. * Only needs to be called if current->signal->tty_audit_buf != %NULL. */ void tty_audit_exit(void) { struct tty_audit_buf *buf; spin_lock_irq(&current->sighand->siglock); buf = current->signal->tty_audit_buf; current->signal->tty_audit_buf = NULL; spin_unlock_irq(&current->sighand->siglock); if (!buf) return; mutex_lock(&buf->mutex); tty_audit_buf_push_current(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } /** * tty_audit_fork - Copy TTY audit state for a new task * * Set up TTY audit state in @sig from current. @sig needs no locking. */ void tty_audit_fork(struct signal_struct *sig) { spin_lock_irq(&current->sighand->siglock); sig->audit_tty = current->signal->audit_tty; spin_unlock_irq(&current->sighand->siglock); } /** * tty_audit_tiocsti - Log TIOCSTI */ void tty_audit_tiocsti(struct tty_struct *tty, char ch) { struct tty_audit_buf *buf; int major, minor, should_audit; spin_lock_irq(&current->sighand->siglock); should_audit = current->signal->audit_tty; buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); spin_unlock_irq(&current->sighand->siglock); major = tty->driver->major; minor = tty->driver->minor_start + tty->index; if (buf) { mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) tty_audit_buf_push_current(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } if (should_audit && audit_enabled) { uid_t auid; unsigned int sessionid; auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, minor, &ch, 1); } } /** * tty_audit_push_task - Flush task's pending audit data * @tsk: task pointer * @loginuid: sender login uid * @sessionid: sender session id * * Called with a ref on @tsk held. Try to lock sighand and get a * reference to the tty audit buffer if available. * Flush the buffer or return an appropriate error code. */ int tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) { struct tty_audit_buf *buf = ERR_PTR(-EPERM); unsigned long flags; if (!lock_task_sighand(tsk, &flags)) return -ESRCH; if (tsk->signal->audit_tty) { buf = tsk->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); } unlock_task_sighand(tsk, &flags); /* * Return 0 when signal->audit_tty set * but tsk->signal->tty_audit_buf == NULL. */ if (!buf || IS_ERR(buf)) return PTR_ERR(buf); mutex_lock(&buf->mutex); tty_audit_buf_push(tsk, loginuid, sessionid, buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); return 0; } /** * tty_audit_buf_get - Get an audit buffer. * * Get an audit buffer for @tty, allocate it if necessary. Return %NULL * if TTY auditing is disabled or out of memory. Otherwise, return a new * reference to the buffer. */ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty) { struct tty_audit_buf *buf, *buf2; buf = NULL; buf2 = NULL; spin_lock_irq(&current->sighand->siglock); if (likely(!current->signal->audit_tty)) goto out; buf = current->signal->tty_audit_buf; if (buf) { atomic_inc(&buf->count); goto out; } spin_unlock_irq(&current->sighand->siglock); buf2 = tty_audit_buf_alloc(tty->driver->major, tty->driver->minor_start + tty->index, tty->icanon); if (buf2 == NULL) { audit_log_lost("out of memory in TTY auditing"); return NULL; } spin_lock_irq(&current->sighand->siglock); if (!current->signal->audit_tty) goto out; buf = current->signal->tty_audit_buf; if (!buf) { current->signal->tty_audit_buf = buf2; buf = buf2; buf2 = NULL; } atomic_inc(&buf->count); /* Fall through */ out: spin_unlock_irq(&current->sighand->siglock); if (buf2) tty_audit_buf_free(buf2); return buf; } /** * tty_audit_add_data - Add data for TTY auditing. * * Audit @data of @size from @tty, if necessary. */ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, size_t size) { struct tty_audit_buf *buf; int major, minor; if (unlikely(size == 0)) return; if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) return; buf = tty_audit_buf_get(tty); if (!buf) return; mutex_lock(&buf->mutex); major = tty->driver->major; minor = tty->driver->minor_start + tty->index; if (buf->major != major || buf->minor != minor || buf->icanon != tty->icanon) { tty_audit_buf_push_current(buf); buf->major = major; buf->minor = minor; buf->icanon = tty->icanon; } do { size_t run; run = N_TTY_BUF_SIZE - buf->valid; if (run > size) run = size; memcpy(buf->data + buf->valid, data, run); buf->valid += run; data += run; size -= run; if (buf->valid == N_TTY_BUF_SIZE) tty_audit_buf_push_current(buf); } while (size != 0); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } /** * tty_audit_push - Push buffered data out * * Make sure no audit data is pending for @tty on the current process. */ void tty_audit_push(struct tty_struct *tty) { struct tty_audit_buf *buf; spin_lock_irq(&current->sighand->siglock); if (likely(!current->signal->audit_tty)) { spin_unlock_irq(&current->sighand->siglock); return; } buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); spin_unlock_irq(&current->sighand->siglock); if (buf) { int major, minor; major = tty->driver->major; minor = tty->driver->minor_start + tty->index; mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) tty_audit_buf_push_current(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } }
gpl-2.0
onejay09/runnymede-kitkat_3.0.101-wip
drivers/ide/hpt366.c
8249
43126
/* * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. * * Thanks to HighPoint Technologies for their assistance, and hardware. * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his * donation of an ABit BP6 mainboard, processor, and memory acellerated * development and support. * * * HighPoint has its own drivers (open source except for the RAID part) * available from http://www.highpoint-tech.com/USA_new/service_support.htm * This may be useful to anyone wanting to work on this driver, however do not * trust them too much since the code tends to become less and less meaningful * as the time passes... :-/ * * Note that final HPT370 support was done by force extraction of GPL. * * - add function for getting/setting power status of drive * - the HPT370's state machine can get confused. reset it before each dma * xfer to prevent that from happening. * - reset state engine whenever we get an error. * - check for busmaster state at end of dma. * - use new highpoint timings. * - detect bus speed using highpoint register. * - use pll if we don't have a clock table. added a 66MHz table that's * just 2x the 33MHz table. * - removed turnaround. NOTE: we never want to switch between pll and * pci clocks as the chip can glitch in those cases. the highpoint * approved workaround slows everything down too much to be useful. in * addition, we would have to serialize access to each chip. * Adrian Sun <a.sun@sun.com> * * add drive timings for 66MHz PCI bus, * fix ATA Cable signal detection, fix incorrect /proc info * add /proc display for per-drive PIO/DMA/UDMA mode and * per-channel ATA-33/66 Cable detect. * Duncan Laurie <void@sun.com> * * fixup /proc output for multiple controllers * Tim Hockin <thockin@sun.com> * * On hpt366: * Reset the hpt366 on error, reset on dma * Fix disabling Fast Interrupt hpt366. * Mike Waychison <crlf@sun.com> * * Added support for 372N clocking and clock switching. The 372N needs * different clocks on read/write. This requires overloading rw_disk and * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for * keeping me sane. * Alan Cox <alan@lxorguk.ukuu.org.uk> * * - fix the clock turnaround code: it was writing to the wrong ports when * called for the secondary channel, caching the current clock mode per- * channel caused the cached register value to get out of sync with the * actual one, the channels weren't serialized, the turnaround shouldn't * be done on 66 MHz PCI bus * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used * does not allow for this speed anyway * - avoid touching disabled channels (e.g. HPT371/N are single channel chips, * their primary channel is kind of virtual, it isn't tied to any pins) * - fix/remove bad/unused timing tables and use one set of tables for the whole * HPT37x chip family; save space by introducing the separate transfer mode * table in which the mode lookup is done * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives * the wrong PCI frequency since DPLL has already been calibrated by BIOS; * read it only from the function 0 of HPT374 chips * - fix the hotswap code: it caused RESET- to glitch when tristating the bus, * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead * - pass to init_chipset() handlers a copy of the IDE PCI device structure as * they tamper with its fields * - pass to the init_setup handlers a copy of the ide_pci_device_t structure * since they may tamper with its fields * - prefix the driver startup messages with the real chip name * - claim the extra 240 bytes of I/O space for all chips * - optimize the UltraDMA filtering and the drive list lookup code * - use pci_get_slot() to get to the function 1 of HPT36x/374 * - cache offset of the channel's misc. control registers (MCRs) being used * throughout the driver * - only touch the relevant MCR when detecting the cable type on HPT374's * function 1 * - rename all the register related variables consistently * - move all the interrupt twiddling code from the speedproc handlers into * init_hwif_hpt366(), also grouping all the DMA related code together there * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings * when setting an UltraDMA mode * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select * the best possible one * - clean up DMA timeout handling for HPT370 * - switch to using the enumeration type to differ between the numerous chip * variants, matching PCI device/revision ID with the chip type early, at the * init_setup stage * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies, * stop duplicating it for each channel by storing the pointer in the pci_dev * structure: first, at the init_setup stage, point it to a static "template" * with only the chip type and its specific base DPLL frequency, the highest * UltraDMA mode, and the chip settings table pointer filled, then, at the * init_chipset stage, allocate per-chip instance and fill it with the rest * of the necessary information * - get rid of the constant thresholds in the HPT37x PCI clock detection code, * switch to calculating PCI clock frequency based on the chip's base DPLL * frequency * - switch to using the DPLL clock and enable UltraATA/133 mode by default on * anything newer than HPT370/A (except HPT374 that is not capable of this * mode according to the manual) * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(), * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips; * unify HPT36x/37x timing setup code and the speedproc handlers by joining * the register setting lists into the table indexed by the clock selected * - set the correct hwif->ultra_mask for each individual chip * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards * - stop resetting HPT370's state machine before each DMA transfer as that has * caused more harm than good * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #define DRV_NAME "hpt366" /* various tuning parameters */ #undef HPT_RESET_STATE_ENGINE #undef HPT_DELAY_INTERRUPT static const char *bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", NULL }; static const char *bad_ata66_4[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", "MAXTOR STM3320620A", NULL }; static const char *bad_ata66_3[] = { "WDC AC310200R", NULL }; static const char *bad_ata33[] = { "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; static u8 xfer_speeds[] = { XFER_UDMA_6, XFER_UDMA_5, XFER_UDMA_4, XFER_UDMA_3, XFER_UDMA_2, XFER_UDMA_1, XFER_UDMA_0, XFER_MW_DMA_2, XFER_MW_DMA_1, XFER_MW_DMA_0, XFER_PIO_4, XFER_PIO_3, XFER_PIO_2, XFER_PIO_1, XFER_PIO_0 }; /* Key for bus clock timings * 36x 37x * bits bits * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock. * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and * MW DMA xfer. * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for * task file register access. * 28 28 UDMA enable. * 29 29 DMA enable. * 30 30 PIO MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 31 FIFO enable. */ static u32 forty_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x900fd943, /* XFER_UDMA_5 */ 0x900fd943, /* XFER_UDMA_4 */ 0x900fd943, /* XFER_UDMA_3 */ 0x900ad943, /* XFER_UDMA_2 */ 0x900bd943, /* XFER_UDMA_1 */ 0x9008d943, /* XFER_UDMA_0 */ 0x9008d943, /* XFER_MW_DMA_2 */ 0xa008d943, /* XFER_MW_DMA_1 */ 0xa010d955, /* XFER_MW_DMA_0 */ 0xa010d9fc, /* XFER_PIO_4 */ 0xc008d963, /* XFER_PIO_3 */ 0xc010d974, /* XFER_PIO_2 */ 0xc010d997, /* XFER_PIO_1 */ 0xc010d9c7, /* XFER_PIO_0 */ 0xc018d9d9 }; static u32 thirty_three_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x90c9a731, /* XFER_UDMA_5 */ 0x90c9a731, /* XFER_UDMA_4 */ 0x90c9a731, /* XFER_UDMA_3 */ 0x90cfa731, /* XFER_UDMA_2 */ 0x90caa731, /* XFER_UDMA_1 */ 0x90cba731, /* XFER_UDMA_0 */ 0x90c8a731, /* XFER_MW_DMA_2 */ 0xa0c8a731, /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */ /* XFER_MW_DMA_0 */ 0xa0c8a797, /* XFER_PIO_4 */ 0xc0c8a731, /* XFER_PIO_3 */ 0xc0c8a742, /* XFER_PIO_2 */ 0xc0d0a753, /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */ /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */ }; static u32 twenty_five_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x90c98521, /* XFER_UDMA_5 */ 0x90c98521, /* XFER_UDMA_4 */ 0x90c98521, /* XFER_UDMA_3 */ 0x90cf8521, /* XFER_UDMA_2 */ 0x90cf8521, /* XFER_UDMA_1 */ 0x90cb8521, /* XFER_UDMA_0 */ 0x90cb8521, /* XFER_MW_DMA_2 */ 0xa0ca8521, /* XFER_MW_DMA_1 */ 0xa0ca8532, /* XFER_MW_DMA_0 */ 0xa0ca8575, /* XFER_PIO_4 */ 0xc0ca8521, /* XFER_PIO_3 */ 0xc0ca8532, /* XFER_PIO_2 */ 0xc0ca8542, /* XFER_PIO_1 */ 0xc0d08572, /* XFER_PIO_0 */ 0xc0d08585 }; /* * The following are the new timing tables with PIO mode data/taskfile transfer * overclocking fixed... */ /* This table is taken from the HPT370 data manual rev. 1.02 */ static u32 thirty_three_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x16455031, /* 0x16655031 ?? */ /* XFER_UDMA_5 */ 0x16455031, /* XFER_UDMA_4 */ 0x16455031, /* XFER_UDMA_3 */ 0x166d5031, /* XFER_UDMA_2 */ 0x16495031, /* XFER_UDMA_1 */ 0x164d5033, /* XFER_UDMA_0 */ 0x16515097, /* XFER_MW_DMA_2 */ 0x26515031, /* XFER_MW_DMA_1 */ 0x26515033, /* XFER_MW_DMA_0 */ 0x26515097, /* XFER_PIO_4 */ 0x06515021, /* XFER_PIO_3 */ 0x06515022, /* XFER_PIO_2 */ 0x06515033, /* XFER_PIO_1 */ 0x06915065, /* XFER_PIO_0 */ 0x06d1508a }; static u32 fifty_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x1a861842, /* XFER_UDMA_5 */ 0x1a861842, /* XFER_UDMA_4 */ 0x1aae1842, /* XFER_UDMA_3 */ 0x1a8e1842, /* XFER_UDMA_2 */ 0x1a0e1842, /* XFER_UDMA_1 */ 0x1a161854, /* XFER_UDMA_0 */ 0x1a1a18ea, /* XFER_MW_DMA_2 */ 0x2a821842, /* XFER_MW_DMA_1 */ 0x2a821854, /* XFER_MW_DMA_0 */ 0x2a8218ea, /* XFER_PIO_4 */ 0x0a821842, /* XFER_PIO_3 */ 0x0a821843, /* XFER_PIO_2 */ 0x0a821855, /* XFER_PIO_1 */ 0x0ac218a8, /* XFER_PIO_0 */ 0x0b02190c }; static u32 sixty_six_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x1c86fe62, /* XFER_UDMA_5 */ 0x1caefe62, /* 0x1c8afe62 */ /* XFER_UDMA_4 */ 0x1c8afe62, /* XFER_UDMA_3 */ 0x1c8efe62, /* XFER_UDMA_2 */ 0x1c92fe62, /* XFER_UDMA_1 */ 0x1c9afe62, /* XFER_UDMA_0 */ 0x1c82fe62, /* XFER_MW_DMA_2 */ 0x2c82fe62, /* XFER_MW_DMA_1 */ 0x2c82fe66, /* XFER_MW_DMA_0 */ 0x2c82ff2e, /* XFER_PIO_4 */ 0x0c82fe62, /* XFER_PIO_3 */ 0x0c82fe84, /* XFER_PIO_2 */ 0x0c82fea6, /* XFER_PIO_1 */ 0x0d02ff26, /* XFER_PIO_0 */ 0x0d42ff7f }; #define HPT371_ALLOW_ATA133_6 1 #define HPT302_ALLOW_ATA133_6 1 #define HPT372_ALLOW_ATA133_6 1 #define HPT370_ALLOW_ATA100_5 0 #define HPT366_ALLOW_ATA66_4 1 #define HPT366_ALLOW_ATA66_3 1 /* Supported ATA clock frequencies */ enum ata_clock { ATA_CLOCK_25MHZ, ATA_CLOCK_33MHZ, ATA_CLOCK_40MHZ, ATA_CLOCK_50MHZ, ATA_CLOCK_66MHZ, NUM_ATA_CLOCKS }; struct hpt_timings { u32 pio_mask; u32 dma_mask; u32 ultra_mask; u32 *clock_table[NUM_ATA_CLOCKS]; }; /* * Hold all the HighPoint chip information in one place. */ struct hpt_info { char *chip_name; /* Chip name */ u8 chip_type; /* Chip type */ u8 udma_mask; /* Allowed UltraDMA modes mask. */ u8 dpll_clk; /* DPLL clock in MHz */ u8 pci_clk; /* PCI clock in MHz */ struct hpt_timings *timings; /* Chipset timing data */ u8 clock; /* ATA clock selected */ }; /* Supported HighPoint chips */ enum { HPT36x, HPT370, HPT370A, HPT374, HPT372, HPT372A, HPT302, HPT371, HPT372N, HPT302N, HPT371N }; static struct hpt_timings hpt36x_timings = { .pio_mask = 0xc1f8ffff, .dma_mask = 0x303800ff, .ultra_mask = 0x30070000, .clock_table = { [ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x, [ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x, [ATA_CLOCK_40MHZ] = forty_base_hpt36x, [ATA_CLOCK_50MHZ] = NULL, [ATA_CLOCK_66MHZ] = NULL } }; static struct hpt_timings hpt37x_timings = { .pio_mask = 0xcfc3ffff, .dma_mask = 0x31c001ff, .ultra_mask = 0x303c0000, .clock_table = { [ATA_CLOCK_25MHZ] = NULL, [ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x, [ATA_CLOCK_40MHZ] = NULL, [ATA_CLOCK_50MHZ] = fifty_base_hpt37x, [ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x } }; static const struct hpt_info hpt36x __devinitdata = { .chip_name = "HPT36x", .chip_type = HPT36x, .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, .dpll_clk = 0, /* no DPLL */ .timings = &hpt36x_timings }; static const struct hpt_info hpt370 __devinitdata = { .chip_name = "HPT370", .chip_type = HPT370, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt370a __devinitdata = { .chip_name = "HPT370A", .chip_type = HPT370A, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt374 __devinitdata = { .chip_name = "HPT374", .chip_type = HPT374, .udma_mask = ATA_UDMA5, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt372 __devinitdata = { .chip_name = "HPT372", .chip_type = HPT372, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 55, .timings = &hpt37x_timings }; static const struct hpt_info hpt372a __devinitdata = { .chip_name = "HPT372A", .chip_type = HPT372A, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt302 __devinitdata = { .chip_name = "HPT302", .chip_type = HPT302, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt371 __devinitdata = { .chip_name = "HPT371", .chip_type = HPT371, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt372n __devinitdata = { .chip_name = "HPT372N", .chip_type = HPT372N, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static const struct hpt_info hpt302n __devinitdata = { .chip_name = "HPT302N", .chip_type = HPT302N, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static const struct hpt_info hpt371n __devinitdata = { .chip_name = "HPT371N", .chip_type = HPT371N, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static int check_in_drive_list(ide_drive_t *drive, const char **list) { char *m = (char *)&drive->id[ATA_ID_PROD]; while (*list) if (!strcmp(*list++, m)) return 1; return 0; } static struct hpt_info *hpt3xx_get_info(struct device *dev) { struct ide_host *host = dev_get_drvdata(dev); struct hpt_info *info = (struct hpt_info *)host->host_priv; return dev == host->dev[1] ? info + 1 : info; } /* * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static u8 hpt3xx_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 mask = hwif->ultra_mask; switch (info->chip_type) { case HPT36x: if (!HPT366_ALLOW_ATA66_4 || check_in_drive_list(drive, bad_ata66_4)) mask = ATA_UDMA3; if (!HPT366_ALLOW_ATA66_3 || check_in_drive_list(drive, bad_ata66_3)) mask = ATA_UDMA2; break; case HPT370: if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) mask = ATA_UDMA4; break; case HPT370A: if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) return ATA_UDMA4; case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) mask &= ~0x0e; /* Fall thru */ default: return mask; } return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask; } static u8 hpt3xx_mdma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct hpt_info *info = hpt3xx_get_info(hwif->dev); switch (info->chip_type) { case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) return 0x00; /* Fall thru */ default: return 0x07; } } static u32 get_speed_setting(u8 speed, struct hpt_info *info) { int i; /* * Lookup the transfer mode table to get the index into * the timing table. * * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used. */ for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++) if (xfer_speeds[i] == speed) break; return info->timings->clock_table[info->clock][i]; } static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); struct hpt_timings *t = info->timings; u8 itr_addr = 0x40 + (drive->dn * 4); u32 old_itr = 0; const u8 speed = drive->dma_mode; u32 new_itr = get_speed_setting(speed, info); u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask : (speed < XFER_UDMA_0 ? t->dma_mask : t->ultra_mask); pci_read_config_dword(dev, itr_addr, &old_itr); new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask); /* * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well) * to avoid problems handling I/O errors later */ new_itr &= ~0xc0000000; pci_write_config_dword(dev, itr_addr, new_itr); } static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { drive->dma_mode = drive->pio_mode; hpt3xx_set_mode(hwif, drive); } static void hpt3xx_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) return; if (info->chip_type >= HPT370) { u8 scr1 = 0; pci_read_config_byte(dev, 0x5a, &scr1); if (((scr1 & 0x10) >> 4) != mask) { if (mask) scr1 |= 0x10; else scr1 &= ~0x10; pci_write_config_byte(dev, 0x5a, scr1); } } else if (mask) disable_irq(hwif->irq); else enable_irq(hwif->irq); } /* * This is specific to the HPT366 UDMA chipset * by HighPoint|Triones Technologies, Inc. */ static void hpt366_dma_lost_irq(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 mcr1 = 0, mcr3 = 0, scr1 = 0; pci_read_config_byte(dev, 0x50, &mcr1); pci_read_config_byte(dev, 0x52, &mcr3); pci_read_config_byte(dev, 0x5a, &scr1); printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n", drive->name, __func__, mcr1, mcr3, scr1); if (scr1 & 0x10) pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); ide_dma_lost_irq(drive); } static void hpt370_clear_engine(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); pci_write_config_byte(dev, hwif->select_data, 0x37); udelay(10); } static void hpt370_irq_timeout(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 bfifo = 0; u8 dma_cmd; pci_read_config_word(dev, hwif->select_data + 2, &bfifo); printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); /* get DMA command mode */ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); /* stop DMA */ outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); hpt370_clear_engine(drive); } static void hpt370_dma_start(ide_drive_t *drive) { #ifdef HPT_RESET_STATE_ENGINE hpt370_clear_engine(drive); #endif ide_dma_start(drive); } static int hpt370_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); if (dma_stat & ATA_DMA_ACTIVE) { /* wait a little */ udelay(20); dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); if (dma_stat & ATA_DMA_ACTIVE) hpt370_irq_timeout(drive); } return ide_dma_end(drive); } /* returns 1 if DMA IRQ issued, 0 otherwise */ static int hpt374_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 bfifo = 0; u8 dma_stat; pci_read_config_word(dev, hwif->select_data + 2, &bfifo); if (bfifo & 0x1FF) { // printk("%s: %d bytes in FIFO\n", drive->name, bfifo); return 0; } dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* return 1 if INTR asserted */ if (dma_stat & ATA_DMA_INTR) return 1; return 0; } static int hpt374_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 mcr = 0, mcr_addr = hwif->select_data; u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01; pci_read_config_byte(dev, 0x6a, &bwsr); pci_read_config_byte(dev, mcr_addr, &mcr); if (bwsr & mask) pci_write_config_byte(dev, mcr_addr, mcr | 0x30); return ide_dma_end(drive); } /** * hpt3xxn_set_clock - perform clock switching dance * @hwif: hwif to switch * @mode: clocking mode (0x21 for write, 0x23 otherwise) * * Switch the DPLL clock on the HPT3xxN devices. This is a right mess. */ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode) { unsigned long base = hwif->extra_base; u8 scr2 = inb(base + 0x6b); if ((scr2 & 0x7f) == mode) return; /* Tristate the bus */ outb(0x80, base + 0x63); outb(0x80, base + 0x67); /* Switch clock and reset channels */ outb(mode, base + 0x6b); outb(0xc0, base + 0x69); /* * Reset the state machines. * NOTE: avoid accidentally enabling the disabled channels. */ outb(inb(base + 0x60) | 0x32, base + 0x60); outb(inb(base + 0x64) | 0x32, base + 0x64); /* Complete reset */ outb(0x00, base + 0x69); /* Reconnect channels to bus */ outb(0x00, base + 0x63); outb(0x00, base + 0x67); } /** * hpt3xxn_rw_disk - prepare for I/O * @drive: drive for command * @rq: block request structure * * This is called when a disk I/O is issued to HPT3xxN. * We need it because of the clock switching. */ static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq) { hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23); } /** * hpt37x_calibrate_dpll - calibrate the DPLL * @dev: PCI device * * Perform a calibration cycle on the DPLL. * Returns 1 if this succeeds */ static int hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high) { u32 dpll = (f_high << 16) | f_low | 0x100; u8 scr2; int i; pci_write_config_dword(dev, 0x5c, dpll); /* Wait for oscillator ready */ for(i = 0; i < 0x5000; ++i) { udelay(50); pci_read_config_byte(dev, 0x5b, &scr2); if (scr2 & 0x80) break; } /* See if it stays ready (we'll just bail out if it's not yet) */ for(i = 0; i < 0x1000; ++i) { pci_read_config_byte(dev, 0x5b, &scr2); /* DPLL destabilized? */ if(!(scr2 & 0x80)) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword (dev, 0x5c, &dpll); pci_write_config_dword(dev, 0x5c, (dpll & ~0x100)); return 1; } static void hpt3xx_disable_fast_irq(struct pci_dev *dev, u8 mcr_addr) { struct ide_host *host = pci_get_drvdata(dev); struct hpt_info *info = host->host_priv + (&dev->dev == host->dev[1]); u8 chip_type = info->chip_type; u8 new_mcr, old_mcr = 0; /* * Disable the "fast interrupt" prediction. Don't hold off * on interrupts. (== 0x01 despite what the docs say) */ pci_read_config_byte(dev, mcr_addr + 1, &old_mcr); if (chip_type >= HPT374) new_mcr = old_mcr & ~0x07; else if (chip_type >= HPT370) { new_mcr = old_mcr; new_mcr &= ~0x02; #ifdef HPT_DELAY_INTERRUPT new_mcr &= ~0x01; #else new_mcr |= 0x01; #endif } else /* HPT366 and HPT368 */ new_mcr = old_mcr & ~0x80; if (new_mcr != old_mcr) pci_write_config_byte(dev, mcr_addr + 1, new_mcr); } static int init_chipset_hpt366(struct pci_dev *dev) { unsigned long io_base = pci_resource_start(dev, 4); struct hpt_info *info = hpt3xx_get_info(&dev->dev); const char *name = DRV_NAME; u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */ u8 chip_type; enum ata_clock clock; chip_type = info->chip_type; pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); /* * First, try to estimate the PCI clock frequency... */ if (chip_type >= HPT370) { u8 scr1 = 0; u16 f_cnt = 0; u32 temp = 0; /* Interrupt force enable. */ pci_read_config_byte(dev, 0x5a, &scr1); if (scr1 & 0x10) pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); /* * HighPoint does this for HPT372A. * NOTE: This register is only writeable via I/O space. */ if (chip_type == HPT372A) outb(0x0e, io_base + 0x9c); /* * Default to PCI clock. Make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. */ pci_write_config_byte(dev, 0x5b, 0x23); /* * We'll have to read f_CNT value in order to determine * the PCI clock frequency according to the following ratio: * * f_CNT = Fpci * 192 / Fdpll * * First try reading the register in which the HighPoint BIOS * saves f_CNT value before reprogramming the DPLL from its * default setting (which differs for the various chips). * * NOTE: This register is only accessible via I/O space; * HPT374 BIOS only saves it for the function 0, so we have to * always read it from there -- no need to check the result of * pci_get_slot() for the function 0 as the whole device has * been already "pinned" (via function 1) in init_setup_hpt374() */ if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) { struct pci_dev *dev1 = pci_get_slot(dev->bus, dev->devfn - 1); unsigned long io_base = pci_resource_start(dev1, 4); temp = inl(io_base + 0x90); pci_dev_put(dev1); } else temp = inl(io_base + 0x90); /* * In case the signature check fails, we'll have to * resort to reading the f_CNT register itself in hopes * that nobody has touched the DPLL yet... */ if ((temp & 0xFFFFF000) != 0xABCDE000) { int i; printk(KERN_WARNING "%s %s: no clock data saved by " "BIOS\n", name, pci_name(dev)); /* Calculate the average value of f_CNT. */ for (temp = i = 0; i < 128; i++) { pci_read_config_word(dev, 0x78, &f_cnt); temp += f_cnt & 0x1ff; mdelay(1); } f_cnt = temp / 128; } else f_cnt = temp & 0x1ff; dpll_clk = info->dpll_clk; pci_clk = (f_cnt * dpll_clk) / 192; /* Clamp PCI clock to bands. */ if (pci_clk < 40) pci_clk = 33; else if(pci_clk < 45) pci_clk = 40; else if(pci_clk < 55) pci_clk = 50; else pci_clk = 66; printk(KERN_INFO "%s %s: DPLL base: %d MHz, f_CNT: %d, " "assuming %d MHz PCI\n", name, pci_name(dev), dpll_clk, f_cnt, pci_clk); } else { u32 itr1 = 0; pci_read_config_dword(dev, 0x40, &itr1); /* Detect PCI clock by looking at cmd_high_time. */ switch((itr1 >> 8) & 0x07) { case 0x09: pci_clk = 40; break; case 0x05: pci_clk = 25; break; case 0x07: default: pci_clk = 33; break; } } /* Let's assume we'll use PCI clock for the ATA clock... */ switch (pci_clk) { case 25: clock = ATA_CLOCK_25MHZ; break; case 33: default: clock = ATA_CLOCK_33MHZ; break; case 40: clock = ATA_CLOCK_40MHZ; break; case 50: clock = ATA_CLOCK_50MHZ; break; case 66: clock = ATA_CLOCK_66MHZ; break; } /* * Only try the DPLL if we don't have a table for the PCI clock that * we are running at for HPT370/A, always use it for anything newer... * * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI. * We also don't like using the DPLL because this causes glitches * on PRST-/SRST- when the state engine gets reset... */ if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) { u16 f_low, delta = pci_clk < 50 ? 2 : 4; int adjust; /* * Select 66 MHz DPLL clock only if UltraATA/133 mode is * supported/enabled, use 50 MHz DPLL clock otherwise... */ if (info->udma_mask == ATA_UDMA6) { dpll_clk = 66; clock = ATA_CLOCK_66MHZ; } else if (dpll_clk) { /* HPT36x chips don't have DPLL */ dpll_clk = 50; clock = ATA_CLOCK_50MHZ; } if (info->timings->clock_table[clock] == NULL) { printk(KERN_ERR "%s %s: unknown bus timing!\n", name, pci_name(dev)); return -EIO; } /* Select the DPLL clock. */ pci_write_config_byte(dev, 0x5b, 0x21); /* * Adjust the DPLL based upon PCI clock, enable it, * and wait for stabilization... */ f_low = (pci_clk * 48) / dpll_clk; for (adjust = 0; adjust < 8; adjust++) { if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta)) break; /* * See if it'll settle at a fractionally different clock */ if (adjust & 1) f_low -= adjust >> 1; else f_low += adjust >> 1; } if (adjust == 8) { printk(KERN_ERR "%s %s: DPLL did not stabilize!\n", name, pci_name(dev)); return -EIO; } printk(KERN_INFO "%s %s: using %d MHz DPLL clock\n", name, pci_name(dev), dpll_clk); } else { /* Mark the fact that we're not using the DPLL. */ dpll_clk = 0; printk(KERN_INFO "%s %s: using %d MHz PCI clock\n", name, pci_name(dev), pci_clk); } /* Store the clock frequencies. */ info->dpll_clk = dpll_clk; info->pci_clk = pci_clk; info->clock = clock; if (chip_type >= HPT370) { u8 mcr1, mcr4; /* * Reset the state engines. * NOTE: Avoid accidentally enabling the disabled channels. */ pci_read_config_byte (dev, 0x50, &mcr1); pci_read_config_byte (dev, 0x54, &mcr4); pci_write_config_byte(dev, 0x50, (mcr1 | 0x32)); pci_write_config_byte(dev, 0x54, (mcr4 | 0x32)); udelay(100); } /* * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in * the MISC. register to stretch the UltraDMA Tss timing. * NOTE: This register is only writeable via I/O space. */ if (chip_type == HPT371N && clock == ATA_CLOCK_66MHZ) outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c); hpt3xx_disable_fast_irq(dev, 0x50); hpt3xx_disable_fast_irq(dev, 0x54); return 0; } static u8 hpt3xx_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 chip_type = info->chip_type; u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02; /* * The HPT37x uses the CBLID pins as outputs for MA15/MA16 * address lines to access an external EEPROM. To read valid * cable detect state the pins must be enabled as inputs. */ if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) { /* * HPT374 PCI function 1 * - set bit 15 of reg 0x52 to enable TCBLID as input * - set bit 15 of reg 0x56 to enable FCBLID as input */ u8 mcr_addr = hwif->select_data + 2; u16 mcr; pci_read_config_word(dev, mcr_addr, &mcr); pci_write_config_word(dev, mcr_addr, mcr | 0x8000); /* Debounce, then read cable ID register */ udelay(10); pci_read_config_byte(dev, 0x5a, &scr1); pci_write_config_word(dev, mcr_addr, mcr); } else if (chip_type >= HPT370) { /* * HPT370/372 and 374 pcifn 0 * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs */ u8 scr2 = 0; pci_read_config_byte(dev, 0x5b, &scr2); pci_write_config_byte(dev, 0x5b, scr2 & ~1); /* Debounce, then read cable ID register */ udelay(10); pci_read_config_byte(dev, 0x5a, &scr1); pci_write_config_byte(dev, 0x5b, scr2); } else pci_read_config_byte(dev, 0x5a, &scr1); return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; } static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) { struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 chip_type = info->chip_type; /* Cache the channel's MISC. control registers' offset */ hwif->select_data = hwif->channel ? 0x54 : 0x50; /* * HPT3xxN chips have some complications: * * - on 33 MHz PCI we must clock switch * - on 66 MHz PCI we must NOT use the PCI clock */ if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) { /* * Clock is shared between the channels, * so we'll have to serialize them... :-( */ hwif->host->host_flags |= IDE_HFLAG_SERIALIZE; hwif->rw_disk = &hpt3xxn_rw_disk; } } static int __devinit init_dma_hpt366(ide_hwif_t *hwif, const struct ide_port_info *d) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags, base = ide_pci_dma_base(hwif, d); u8 dma_old, dma_new, masterdma = 0, slavedma = 0; if (base == 0) return -1; hwif->dma_base = base; if (ide_pci_check_simplex(hwif, d) < 0) return -1; if (ide_pci_set_master(dev, d->name) < 0) return -1; dma_old = inb(base + 2); local_irq_save(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma); if (masterdma & 0x30) dma_new |= 0x20; if ( slavedma & 0x30) dma_new |= 0x40; if (dma_new != dma_old) outb(dma_new, base + 2); local_irq_restore(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); hwif->extra_base = base + (hwif->channel ? 8 : 16); if (ide_allocate_dma_engine(hwif)) return -1; return 0; } static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2) { if (dev2->irq != dev->irq) { /* FIXME: we need a core pci_set_interrupt() */ dev2->irq = dev->irq; printk(KERN_INFO DRV_NAME " %s: PCI config space interrupt " "fixed\n", pci_name(dev2)); } } static void __devinit hpt371_init(struct pci_dev *dev) { u8 mcr1 = 0; /* * HPT371 chips physically have only one channel, the secondary one, * but the primary channel registers do exist! Go figure... * So, we manually disable the non-existing channel here * (if the BIOS hasn't done this already). */ pci_read_config_byte(dev, 0x50, &mcr1); if (mcr1 & 0x04) pci_write_config_byte(dev, 0x50, mcr1 & ~0x04); } static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) { u8 mcr1 = 0, pin1 = 0, pin2 = 0; /* * Now we'll have to force both channels enabled if * at least one of them has been enabled by BIOS... */ pci_read_config_byte(dev, 0x50, &mcr1); if (mcr1 & 0x30) pci_write_config_byte(dev, 0x50, mcr1 | 0x30); pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1); pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2); if (pin1 != pin2 && dev->irq == dev2->irq) { printk(KERN_INFO DRV_NAME " %s: onboard version of chipset, " "pin1=%d pin2=%d\n", pci_name(dev), pin1, pin2); return 1; } return 0; } #define IDE_HFLAGS_HPT3XX \ (IDE_HFLAG_NO_ATAPI_DMA | \ IDE_HFLAG_OFF_BOARD) static const struct ide_port_ops hpt3xx_port_ops = { .set_pio_mode = hpt3xx_set_pio_mode, .set_dma_mode = hpt3xx_set_mode, .maskproc = hpt3xx_maskproc, .mdma_filter = hpt3xx_mdma_filter, .udma_filter = hpt3xx_udma_filter, .cable_detect = hpt3xx_cable_detect, }; static const struct ide_dma_ops hpt37x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = hpt374_dma_end, .dma_test_irq = hpt374_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_dma_ops hpt370_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = hpt370_dma_start, .dma_end = hpt370_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_clear = hpt370_irq_timeout, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_dma_ops hpt36x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = hpt366_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info hpt366_chipsets[] __devinitdata = { { /* 0: HPT36x */ .name = DRV_NAME, .init_chipset = init_chipset_hpt366, .init_hwif = init_hwif_hpt366, .init_dma = init_dma_hpt366, /* * HPT36x chips have one channel per function and have * both channel enable bits located differently and visible * to both functions -- really stupid design decision... :-( * Bit 4 is for the primary channel, bit 5 for the secondary. */ .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}}, .port_ops = &hpt3xx_port_ops, .dma_ops = &hpt36x_dma_ops, .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }, { /* 1: HPT3xx */ .name = DRV_NAME, .init_chipset = init_chipset_hpt366, .init_hwif = init_hwif_hpt366, .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .port_ops = &hpt3xx_port_ops, .dma_ops = &hpt37x_dma_ops, .host_flags = IDE_HFLAGS_HPT3XX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, } }; /** * hpt366_init_one - called when an HPT366 is found * @dev: the hpt366 device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct hpt_info *info = NULL; struct hpt_info *dyn_info; struct pci_dev *dev2 = NULL; struct ide_port_info d; u8 idx = id->driver_data; u8 rev = dev->revision; int ret; if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1)) return -ENODEV; switch (idx) { case 0: if (rev < 3) info = &hpt36x; else { switch (min_t(u8, rev, 6)) { case 3: info = &hpt370; break; case 4: info = &hpt370a; break; case 5: info = &hpt372; break; case 6: info = &hpt372n; break; } idx++; } break; case 1: info = (rev > 1) ? &hpt372n : &hpt372a; break; case 2: info = (rev > 1) ? &hpt302n : &hpt302; break; case 3: hpt371_init(dev); info = (rev > 1) ? &hpt371n : &hpt371; break; case 4: info = &hpt374; break; case 5: info = &hpt372n; break; } printk(KERN_INFO DRV_NAME ": %s chipset detected\n", info->chip_name); d = hpt366_chipsets[min_t(u8, idx, 1)]; d.udma_mask = info->udma_mask; /* fixup ->dma_ops for HPT370/HPT370A */ if (info == &hpt370 || info == &hpt370a) d.dma_ops = &hpt370_dma_ops; if (info == &hpt36x || info == &hpt374) dev2 = pci_get_slot(dev->bus, dev->devfn + 1); dyn_info = kzalloc(sizeof(*dyn_info) * (dev2 ? 2 : 1), GFP_KERNEL); if (dyn_info == NULL) { printk(KERN_ERR "%s %s: out of memory!\n", d.name, pci_name(dev)); pci_dev_put(dev2); return -ENOMEM; } /* * Copy everything from a static "template" structure * to just allocated per-chip hpt_info structure. */ memcpy(dyn_info, info, sizeof(*dyn_info)); if (dev2) { memcpy(dyn_info + 1, info, sizeof(*dyn_info)); if (info == &hpt374) hpt374_init(dev, dev2); else { if (hpt36x_init(dev, dev2)) d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE; } ret = ide_pci_init_two(dev, dev2, &d, dyn_info); if (ret < 0) { pci_dev_put(dev2); kfree(dyn_info); } return ret; } ret = ide_pci_init_one(dev, &d, dyn_info); if (ret < 0) kfree(dyn_info); return ret; } static void __devexit hpt366_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); struct ide_info *info = host->host_priv; struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL; ide_pci_remove(dev); pci_dev_put(dev2); kfree(info); } static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), 3 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), 4 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), 5 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl); static struct pci_driver hpt366_pci_driver = { .name = "HPT366_IDE", .id_table = hpt366_pci_tbl, .probe = hpt366_init_one, .remove = __devexit_p(hpt366_remove), .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init hpt366_ide_init(void) { return ide_pci_register_driver(&hpt366_pci_driver); } static void __exit hpt366_ide_exit(void) { pci_unregister_driver(&hpt366_pci_driver); } module_init(hpt366_ide_init); module_exit(hpt366_ide_exit); MODULE_AUTHOR("Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
TheNotOnly/linux-3.5
drivers/ide/hpt366.c
8249
43126
/* * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. * * Thanks to HighPoint Technologies for their assistance, and hardware. * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his * donation of an ABit BP6 mainboard, processor, and memory acellerated * development and support. * * * HighPoint has its own drivers (open source except for the RAID part) * available from http://www.highpoint-tech.com/USA_new/service_support.htm * This may be useful to anyone wanting to work on this driver, however do not * trust them too much since the code tends to become less and less meaningful * as the time passes... :-/ * * Note that final HPT370 support was done by force extraction of GPL. * * - add function for getting/setting power status of drive * - the HPT370's state machine can get confused. reset it before each dma * xfer to prevent that from happening. * - reset state engine whenever we get an error. * - check for busmaster state at end of dma. * - use new highpoint timings. * - detect bus speed using highpoint register. * - use pll if we don't have a clock table. added a 66MHz table that's * just 2x the 33MHz table. * - removed turnaround. NOTE: we never want to switch between pll and * pci clocks as the chip can glitch in those cases. the highpoint * approved workaround slows everything down too much to be useful. in * addition, we would have to serialize access to each chip. * Adrian Sun <a.sun@sun.com> * * add drive timings for 66MHz PCI bus, * fix ATA Cable signal detection, fix incorrect /proc info * add /proc display for per-drive PIO/DMA/UDMA mode and * per-channel ATA-33/66 Cable detect. * Duncan Laurie <void@sun.com> * * fixup /proc output for multiple controllers * Tim Hockin <thockin@sun.com> * * On hpt366: * Reset the hpt366 on error, reset on dma * Fix disabling Fast Interrupt hpt366. * Mike Waychison <crlf@sun.com> * * Added support for 372N clocking and clock switching. The 372N needs * different clocks on read/write. This requires overloading rw_disk and * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for * keeping me sane. * Alan Cox <alan@lxorguk.ukuu.org.uk> * * - fix the clock turnaround code: it was writing to the wrong ports when * called for the secondary channel, caching the current clock mode per- * channel caused the cached register value to get out of sync with the * actual one, the channels weren't serialized, the turnaround shouldn't * be done on 66 MHz PCI bus * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used * does not allow for this speed anyway * - avoid touching disabled channels (e.g. HPT371/N are single channel chips, * their primary channel is kind of virtual, it isn't tied to any pins) * - fix/remove bad/unused timing tables and use one set of tables for the whole * HPT37x chip family; save space by introducing the separate transfer mode * table in which the mode lookup is done * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives * the wrong PCI frequency since DPLL has already been calibrated by BIOS; * read it only from the function 0 of HPT374 chips * - fix the hotswap code: it caused RESET- to glitch when tristating the bus, * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead * - pass to init_chipset() handlers a copy of the IDE PCI device structure as * they tamper with its fields * - pass to the init_setup handlers a copy of the ide_pci_device_t structure * since they may tamper with its fields * - prefix the driver startup messages with the real chip name * - claim the extra 240 bytes of I/O space for all chips * - optimize the UltraDMA filtering and the drive list lookup code * - use pci_get_slot() to get to the function 1 of HPT36x/374 * - cache offset of the channel's misc. control registers (MCRs) being used * throughout the driver * - only touch the relevant MCR when detecting the cable type on HPT374's * function 1 * - rename all the register related variables consistently * - move all the interrupt twiddling code from the speedproc handlers into * init_hwif_hpt366(), also grouping all the DMA related code together there * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings * when setting an UltraDMA mode * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select * the best possible one * - clean up DMA timeout handling for HPT370 * - switch to using the enumeration type to differ between the numerous chip * variants, matching PCI device/revision ID with the chip type early, at the * init_setup stage * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies, * stop duplicating it for each channel by storing the pointer in the pci_dev * structure: first, at the init_setup stage, point it to a static "template" * with only the chip type and its specific base DPLL frequency, the highest * UltraDMA mode, and the chip settings table pointer filled, then, at the * init_chipset stage, allocate per-chip instance and fill it with the rest * of the necessary information * - get rid of the constant thresholds in the HPT37x PCI clock detection code, * switch to calculating PCI clock frequency based on the chip's base DPLL * frequency * - switch to using the DPLL clock and enable UltraATA/133 mode by default on * anything newer than HPT370/A (except HPT374 that is not capable of this * mode according to the manual) * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(), * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips; * unify HPT36x/37x timing setup code and the speedproc handlers by joining * the register setting lists into the table indexed by the clock selected * - set the correct hwif->ultra_mask for each individual chip * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards * - stop resetting HPT370's state machine before each DMA transfer as that has * caused more harm than good * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #define DRV_NAME "hpt366" /* various tuning parameters */ #undef HPT_RESET_STATE_ENGINE #undef HPT_DELAY_INTERRUPT static const char *bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", NULL }; static const char *bad_ata66_4[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", "MAXTOR STM3320620A", NULL }; static const char *bad_ata66_3[] = { "WDC AC310200R", NULL }; static const char *bad_ata33[] = { "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; static u8 xfer_speeds[] = { XFER_UDMA_6, XFER_UDMA_5, XFER_UDMA_4, XFER_UDMA_3, XFER_UDMA_2, XFER_UDMA_1, XFER_UDMA_0, XFER_MW_DMA_2, XFER_MW_DMA_1, XFER_MW_DMA_0, XFER_PIO_4, XFER_PIO_3, XFER_PIO_2, XFER_PIO_1, XFER_PIO_0 }; /* Key for bus clock timings * 36x 37x * bits bits * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock. * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and * MW DMA xfer. * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for * task file register access. * 28 28 UDMA enable. * 29 29 DMA enable. * 30 30 PIO MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 31 FIFO enable. */ static u32 forty_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x900fd943, /* XFER_UDMA_5 */ 0x900fd943, /* XFER_UDMA_4 */ 0x900fd943, /* XFER_UDMA_3 */ 0x900ad943, /* XFER_UDMA_2 */ 0x900bd943, /* XFER_UDMA_1 */ 0x9008d943, /* XFER_UDMA_0 */ 0x9008d943, /* XFER_MW_DMA_2 */ 0xa008d943, /* XFER_MW_DMA_1 */ 0xa010d955, /* XFER_MW_DMA_0 */ 0xa010d9fc, /* XFER_PIO_4 */ 0xc008d963, /* XFER_PIO_3 */ 0xc010d974, /* XFER_PIO_2 */ 0xc010d997, /* XFER_PIO_1 */ 0xc010d9c7, /* XFER_PIO_0 */ 0xc018d9d9 }; static u32 thirty_three_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x90c9a731, /* XFER_UDMA_5 */ 0x90c9a731, /* XFER_UDMA_4 */ 0x90c9a731, /* XFER_UDMA_3 */ 0x90cfa731, /* XFER_UDMA_2 */ 0x90caa731, /* XFER_UDMA_1 */ 0x90cba731, /* XFER_UDMA_0 */ 0x90c8a731, /* XFER_MW_DMA_2 */ 0xa0c8a731, /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */ /* XFER_MW_DMA_0 */ 0xa0c8a797, /* XFER_PIO_4 */ 0xc0c8a731, /* XFER_PIO_3 */ 0xc0c8a742, /* XFER_PIO_2 */ 0xc0d0a753, /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */ /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */ }; static u32 twenty_five_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x90c98521, /* XFER_UDMA_5 */ 0x90c98521, /* XFER_UDMA_4 */ 0x90c98521, /* XFER_UDMA_3 */ 0x90cf8521, /* XFER_UDMA_2 */ 0x90cf8521, /* XFER_UDMA_1 */ 0x90cb8521, /* XFER_UDMA_0 */ 0x90cb8521, /* XFER_MW_DMA_2 */ 0xa0ca8521, /* XFER_MW_DMA_1 */ 0xa0ca8532, /* XFER_MW_DMA_0 */ 0xa0ca8575, /* XFER_PIO_4 */ 0xc0ca8521, /* XFER_PIO_3 */ 0xc0ca8532, /* XFER_PIO_2 */ 0xc0ca8542, /* XFER_PIO_1 */ 0xc0d08572, /* XFER_PIO_0 */ 0xc0d08585 }; /* * The following are the new timing tables with PIO mode data/taskfile transfer * overclocking fixed... */ /* This table is taken from the HPT370 data manual rev. 1.02 */ static u32 thirty_three_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x16455031, /* 0x16655031 ?? */ /* XFER_UDMA_5 */ 0x16455031, /* XFER_UDMA_4 */ 0x16455031, /* XFER_UDMA_3 */ 0x166d5031, /* XFER_UDMA_2 */ 0x16495031, /* XFER_UDMA_1 */ 0x164d5033, /* XFER_UDMA_0 */ 0x16515097, /* XFER_MW_DMA_2 */ 0x26515031, /* XFER_MW_DMA_1 */ 0x26515033, /* XFER_MW_DMA_0 */ 0x26515097, /* XFER_PIO_4 */ 0x06515021, /* XFER_PIO_3 */ 0x06515022, /* XFER_PIO_2 */ 0x06515033, /* XFER_PIO_1 */ 0x06915065, /* XFER_PIO_0 */ 0x06d1508a }; static u32 fifty_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x1a861842, /* XFER_UDMA_5 */ 0x1a861842, /* XFER_UDMA_4 */ 0x1aae1842, /* XFER_UDMA_3 */ 0x1a8e1842, /* XFER_UDMA_2 */ 0x1a0e1842, /* XFER_UDMA_1 */ 0x1a161854, /* XFER_UDMA_0 */ 0x1a1a18ea, /* XFER_MW_DMA_2 */ 0x2a821842, /* XFER_MW_DMA_1 */ 0x2a821854, /* XFER_MW_DMA_0 */ 0x2a8218ea, /* XFER_PIO_4 */ 0x0a821842, /* XFER_PIO_3 */ 0x0a821843, /* XFER_PIO_2 */ 0x0a821855, /* XFER_PIO_1 */ 0x0ac218a8, /* XFER_PIO_0 */ 0x0b02190c }; static u32 sixty_six_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x1c86fe62, /* XFER_UDMA_5 */ 0x1caefe62, /* 0x1c8afe62 */ /* XFER_UDMA_4 */ 0x1c8afe62, /* XFER_UDMA_3 */ 0x1c8efe62, /* XFER_UDMA_2 */ 0x1c92fe62, /* XFER_UDMA_1 */ 0x1c9afe62, /* XFER_UDMA_0 */ 0x1c82fe62, /* XFER_MW_DMA_2 */ 0x2c82fe62, /* XFER_MW_DMA_1 */ 0x2c82fe66, /* XFER_MW_DMA_0 */ 0x2c82ff2e, /* XFER_PIO_4 */ 0x0c82fe62, /* XFER_PIO_3 */ 0x0c82fe84, /* XFER_PIO_2 */ 0x0c82fea6, /* XFER_PIO_1 */ 0x0d02ff26, /* XFER_PIO_0 */ 0x0d42ff7f }; #define HPT371_ALLOW_ATA133_6 1 #define HPT302_ALLOW_ATA133_6 1 #define HPT372_ALLOW_ATA133_6 1 #define HPT370_ALLOW_ATA100_5 0 #define HPT366_ALLOW_ATA66_4 1 #define HPT366_ALLOW_ATA66_3 1 /* Supported ATA clock frequencies */ enum ata_clock { ATA_CLOCK_25MHZ, ATA_CLOCK_33MHZ, ATA_CLOCK_40MHZ, ATA_CLOCK_50MHZ, ATA_CLOCK_66MHZ, NUM_ATA_CLOCKS }; struct hpt_timings { u32 pio_mask; u32 dma_mask; u32 ultra_mask; u32 *clock_table[NUM_ATA_CLOCKS]; }; /* * Hold all the HighPoint chip information in one place. */ struct hpt_info { char *chip_name; /* Chip name */ u8 chip_type; /* Chip type */ u8 udma_mask; /* Allowed UltraDMA modes mask. */ u8 dpll_clk; /* DPLL clock in MHz */ u8 pci_clk; /* PCI clock in MHz */ struct hpt_timings *timings; /* Chipset timing data */ u8 clock; /* ATA clock selected */ }; /* Supported HighPoint chips */ enum { HPT36x, HPT370, HPT370A, HPT374, HPT372, HPT372A, HPT302, HPT371, HPT372N, HPT302N, HPT371N }; static struct hpt_timings hpt36x_timings = { .pio_mask = 0xc1f8ffff, .dma_mask = 0x303800ff, .ultra_mask = 0x30070000, .clock_table = { [ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x, [ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x, [ATA_CLOCK_40MHZ] = forty_base_hpt36x, [ATA_CLOCK_50MHZ] = NULL, [ATA_CLOCK_66MHZ] = NULL } }; static struct hpt_timings hpt37x_timings = { .pio_mask = 0xcfc3ffff, .dma_mask = 0x31c001ff, .ultra_mask = 0x303c0000, .clock_table = { [ATA_CLOCK_25MHZ] = NULL, [ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x, [ATA_CLOCK_40MHZ] = NULL, [ATA_CLOCK_50MHZ] = fifty_base_hpt37x, [ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x } }; static const struct hpt_info hpt36x __devinitdata = { .chip_name = "HPT36x", .chip_type = HPT36x, .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, .dpll_clk = 0, /* no DPLL */ .timings = &hpt36x_timings }; static const struct hpt_info hpt370 __devinitdata = { .chip_name = "HPT370", .chip_type = HPT370, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt370a __devinitdata = { .chip_name = "HPT370A", .chip_type = HPT370A, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt374 __devinitdata = { .chip_name = "HPT374", .chip_type = HPT374, .udma_mask = ATA_UDMA5, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt372 __devinitdata = { .chip_name = "HPT372", .chip_type = HPT372, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 55, .timings = &hpt37x_timings }; static const struct hpt_info hpt372a __devinitdata = { .chip_name = "HPT372A", .chip_type = HPT372A, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt302 __devinitdata = { .chip_name = "HPT302", .chip_type = HPT302, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt371 __devinitdata = { .chip_name = "HPT371", .chip_type = HPT371, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt372n __devinitdata = { .chip_name = "HPT372N", .chip_type = HPT372N, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static const struct hpt_info hpt302n __devinitdata = { .chip_name = "HPT302N", .chip_type = HPT302N, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static const struct hpt_info hpt371n __devinitdata = { .chip_name = "HPT371N", .chip_type = HPT371N, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static int check_in_drive_list(ide_drive_t *drive, const char **list) { char *m = (char *)&drive->id[ATA_ID_PROD]; while (*list) if (!strcmp(*list++, m)) return 1; return 0; } static struct hpt_info *hpt3xx_get_info(struct device *dev) { struct ide_host *host = dev_get_drvdata(dev); struct hpt_info *info = (struct hpt_info *)host->host_priv; return dev == host->dev[1] ? info + 1 : info; } /* * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static u8 hpt3xx_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 mask = hwif->ultra_mask; switch (info->chip_type) { case HPT36x: if (!HPT366_ALLOW_ATA66_4 || check_in_drive_list(drive, bad_ata66_4)) mask = ATA_UDMA3; if (!HPT366_ALLOW_ATA66_3 || check_in_drive_list(drive, bad_ata66_3)) mask = ATA_UDMA2; break; case HPT370: if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) mask = ATA_UDMA4; break; case HPT370A: if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) return ATA_UDMA4; case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) mask &= ~0x0e; /* Fall thru */ default: return mask; } return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask; } static u8 hpt3xx_mdma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct hpt_info *info = hpt3xx_get_info(hwif->dev); switch (info->chip_type) { case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) return 0x00; /* Fall thru */ default: return 0x07; } } static u32 get_speed_setting(u8 speed, struct hpt_info *info) { int i; /* * Lookup the transfer mode table to get the index into * the timing table. * * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used. */ for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++) if (xfer_speeds[i] == speed) break; return info->timings->clock_table[info->clock][i]; } static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); struct hpt_timings *t = info->timings; u8 itr_addr = 0x40 + (drive->dn * 4); u32 old_itr = 0; const u8 speed = drive->dma_mode; u32 new_itr = get_speed_setting(speed, info); u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask : (speed < XFER_UDMA_0 ? t->dma_mask : t->ultra_mask); pci_read_config_dword(dev, itr_addr, &old_itr); new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask); /* * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well) * to avoid problems handling I/O errors later */ new_itr &= ~0xc0000000; pci_write_config_dword(dev, itr_addr, new_itr); } static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { drive->dma_mode = drive->pio_mode; hpt3xx_set_mode(hwif, drive); } static void hpt3xx_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) return; if (info->chip_type >= HPT370) { u8 scr1 = 0; pci_read_config_byte(dev, 0x5a, &scr1); if (((scr1 & 0x10) >> 4) != mask) { if (mask) scr1 |= 0x10; else scr1 &= ~0x10; pci_write_config_byte(dev, 0x5a, scr1); } } else if (mask) disable_irq(hwif->irq); else enable_irq(hwif->irq); } /* * This is specific to the HPT366 UDMA chipset * by HighPoint|Triones Technologies, Inc. */ static void hpt366_dma_lost_irq(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 mcr1 = 0, mcr3 = 0, scr1 = 0; pci_read_config_byte(dev, 0x50, &mcr1); pci_read_config_byte(dev, 0x52, &mcr3); pci_read_config_byte(dev, 0x5a, &scr1); printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n", drive->name, __func__, mcr1, mcr3, scr1); if (scr1 & 0x10) pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); ide_dma_lost_irq(drive); } static void hpt370_clear_engine(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); pci_write_config_byte(dev, hwif->select_data, 0x37); udelay(10); } static void hpt370_irq_timeout(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 bfifo = 0; u8 dma_cmd; pci_read_config_word(dev, hwif->select_data + 2, &bfifo); printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); /* get DMA command mode */ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); /* stop DMA */ outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); hpt370_clear_engine(drive); } static void hpt370_dma_start(ide_drive_t *drive) { #ifdef HPT_RESET_STATE_ENGINE hpt370_clear_engine(drive); #endif ide_dma_start(drive); } static int hpt370_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); if (dma_stat & ATA_DMA_ACTIVE) { /* wait a little */ udelay(20); dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); if (dma_stat & ATA_DMA_ACTIVE) hpt370_irq_timeout(drive); } return ide_dma_end(drive); } /* returns 1 if DMA IRQ issued, 0 otherwise */ static int hpt374_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 bfifo = 0; u8 dma_stat; pci_read_config_word(dev, hwif->select_data + 2, &bfifo); if (bfifo & 0x1FF) { // printk("%s: %d bytes in FIFO\n", drive->name, bfifo); return 0; } dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* return 1 if INTR asserted */ if (dma_stat & ATA_DMA_INTR) return 1; return 0; } static int hpt374_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 mcr = 0, mcr_addr = hwif->select_data; u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01; pci_read_config_byte(dev, 0x6a, &bwsr); pci_read_config_byte(dev, mcr_addr, &mcr); if (bwsr & mask) pci_write_config_byte(dev, mcr_addr, mcr | 0x30); return ide_dma_end(drive); } /** * hpt3xxn_set_clock - perform clock switching dance * @hwif: hwif to switch * @mode: clocking mode (0x21 for write, 0x23 otherwise) * * Switch the DPLL clock on the HPT3xxN devices. This is a right mess. */ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode) { unsigned long base = hwif->extra_base; u8 scr2 = inb(base + 0x6b); if ((scr2 & 0x7f) == mode) return; /* Tristate the bus */ outb(0x80, base + 0x63); outb(0x80, base + 0x67); /* Switch clock and reset channels */ outb(mode, base + 0x6b); outb(0xc0, base + 0x69); /* * Reset the state machines. * NOTE: avoid accidentally enabling the disabled channels. */ outb(inb(base + 0x60) | 0x32, base + 0x60); outb(inb(base + 0x64) | 0x32, base + 0x64); /* Complete reset */ outb(0x00, base + 0x69); /* Reconnect channels to bus */ outb(0x00, base + 0x63); outb(0x00, base + 0x67); } /** * hpt3xxn_rw_disk - prepare for I/O * @drive: drive for command * @rq: block request structure * * This is called when a disk I/O is issued to HPT3xxN. * We need it because of the clock switching. */ static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq) { hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23); } /** * hpt37x_calibrate_dpll - calibrate the DPLL * @dev: PCI device * * Perform a calibration cycle on the DPLL. * Returns 1 if this succeeds */ static int hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high) { u32 dpll = (f_high << 16) | f_low | 0x100; u8 scr2; int i; pci_write_config_dword(dev, 0x5c, dpll); /* Wait for oscillator ready */ for(i = 0; i < 0x5000; ++i) { udelay(50); pci_read_config_byte(dev, 0x5b, &scr2); if (scr2 & 0x80) break; } /* See if it stays ready (we'll just bail out if it's not yet) */ for(i = 0; i < 0x1000; ++i) { pci_read_config_byte(dev, 0x5b, &scr2); /* DPLL destabilized? */ if(!(scr2 & 0x80)) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword (dev, 0x5c, &dpll); pci_write_config_dword(dev, 0x5c, (dpll & ~0x100)); return 1; } static void hpt3xx_disable_fast_irq(struct pci_dev *dev, u8 mcr_addr) { struct ide_host *host = pci_get_drvdata(dev); struct hpt_info *info = host->host_priv + (&dev->dev == host->dev[1]); u8 chip_type = info->chip_type; u8 new_mcr, old_mcr = 0; /* * Disable the "fast interrupt" prediction. Don't hold off * on interrupts. (== 0x01 despite what the docs say) */ pci_read_config_byte(dev, mcr_addr + 1, &old_mcr); if (chip_type >= HPT374) new_mcr = old_mcr & ~0x07; else if (chip_type >= HPT370) { new_mcr = old_mcr; new_mcr &= ~0x02; #ifdef HPT_DELAY_INTERRUPT new_mcr &= ~0x01; #else new_mcr |= 0x01; #endif } else /* HPT366 and HPT368 */ new_mcr = old_mcr & ~0x80; if (new_mcr != old_mcr) pci_write_config_byte(dev, mcr_addr + 1, new_mcr); } static int init_chipset_hpt366(struct pci_dev *dev) { unsigned long io_base = pci_resource_start(dev, 4); struct hpt_info *info = hpt3xx_get_info(&dev->dev); const char *name = DRV_NAME; u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */ u8 chip_type; enum ata_clock clock; chip_type = info->chip_type; pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); /* * First, try to estimate the PCI clock frequency... */ if (chip_type >= HPT370) { u8 scr1 = 0; u16 f_cnt = 0; u32 temp = 0; /* Interrupt force enable. */ pci_read_config_byte(dev, 0x5a, &scr1); if (scr1 & 0x10) pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); /* * HighPoint does this for HPT372A. * NOTE: This register is only writeable via I/O space. */ if (chip_type == HPT372A) outb(0x0e, io_base + 0x9c); /* * Default to PCI clock. Make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. */ pci_write_config_byte(dev, 0x5b, 0x23); /* * We'll have to read f_CNT value in order to determine * the PCI clock frequency according to the following ratio: * * f_CNT = Fpci * 192 / Fdpll * * First try reading the register in which the HighPoint BIOS * saves f_CNT value before reprogramming the DPLL from its * default setting (which differs for the various chips). * * NOTE: This register is only accessible via I/O space; * HPT374 BIOS only saves it for the function 0, so we have to * always read it from there -- no need to check the result of * pci_get_slot() for the function 0 as the whole device has * been already "pinned" (via function 1) in init_setup_hpt374() */ if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) { struct pci_dev *dev1 = pci_get_slot(dev->bus, dev->devfn - 1); unsigned long io_base = pci_resource_start(dev1, 4); temp = inl(io_base + 0x90); pci_dev_put(dev1); } else temp = inl(io_base + 0x90); /* * In case the signature check fails, we'll have to * resort to reading the f_CNT register itself in hopes * that nobody has touched the DPLL yet... */ if ((temp & 0xFFFFF000) != 0xABCDE000) { int i; printk(KERN_WARNING "%s %s: no clock data saved by " "BIOS\n", name, pci_name(dev)); /* Calculate the average value of f_CNT. */ for (temp = i = 0; i < 128; i++) { pci_read_config_word(dev, 0x78, &f_cnt); temp += f_cnt & 0x1ff; mdelay(1); } f_cnt = temp / 128; } else f_cnt = temp & 0x1ff; dpll_clk = info->dpll_clk; pci_clk = (f_cnt * dpll_clk) / 192; /* Clamp PCI clock to bands. */ if (pci_clk < 40) pci_clk = 33; else if(pci_clk < 45) pci_clk = 40; else if(pci_clk < 55) pci_clk = 50; else pci_clk = 66; printk(KERN_INFO "%s %s: DPLL base: %d MHz, f_CNT: %d, " "assuming %d MHz PCI\n", name, pci_name(dev), dpll_clk, f_cnt, pci_clk); } else { u32 itr1 = 0; pci_read_config_dword(dev, 0x40, &itr1); /* Detect PCI clock by looking at cmd_high_time. */ switch((itr1 >> 8) & 0x07) { case 0x09: pci_clk = 40; break; case 0x05: pci_clk = 25; break; case 0x07: default: pci_clk = 33; break; } } /* Let's assume we'll use PCI clock for the ATA clock... */ switch (pci_clk) { case 25: clock = ATA_CLOCK_25MHZ; break; case 33: default: clock = ATA_CLOCK_33MHZ; break; case 40: clock = ATA_CLOCK_40MHZ; break; case 50: clock = ATA_CLOCK_50MHZ; break; case 66: clock = ATA_CLOCK_66MHZ; break; } /* * Only try the DPLL if we don't have a table for the PCI clock that * we are running at for HPT370/A, always use it for anything newer... * * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI. * We also don't like using the DPLL because this causes glitches * on PRST-/SRST- when the state engine gets reset... */ if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) { u16 f_low, delta = pci_clk < 50 ? 2 : 4; int adjust; /* * Select 66 MHz DPLL clock only if UltraATA/133 mode is * supported/enabled, use 50 MHz DPLL clock otherwise... */ if (info->udma_mask == ATA_UDMA6) { dpll_clk = 66; clock = ATA_CLOCK_66MHZ; } else if (dpll_clk) { /* HPT36x chips don't have DPLL */ dpll_clk = 50; clock = ATA_CLOCK_50MHZ; } if (info->timings->clock_table[clock] == NULL) { printk(KERN_ERR "%s %s: unknown bus timing!\n", name, pci_name(dev)); return -EIO; } /* Select the DPLL clock. */ pci_write_config_byte(dev, 0x5b, 0x21); /* * Adjust the DPLL based upon PCI clock, enable it, * and wait for stabilization... */ f_low = (pci_clk * 48) / dpll_clk; for (adjust = 0; adjust < 8; adjust++) { if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta)) break; /* * See if it'll settle at a fractionally different clock */ if (adjust & 1) f_low -= adjust >> 1; else f_low += adjust >> 1; } if (adjust == 8) { printk(KERN_ERR "%s %s: DPLL did not stabilize!\n", name, pci_name(dev)); return -EIO; } printk(KERN_INFO "%s %s: using %d MHz DPLL clock\n", name, pci_name(dev), dpll_clk); } else { /* Mark the fact that we're not using the DPLL. */ dpll_clk = 0; printk(KERN_INFO "%s %s: using %d MHz PCI clock\n", name, pci_name(dev), pci_clk); } /* Store the clock frequencies. */ info->dpll_clk = dpll_clk; info->pci_clk = pci_clk; info->clock = clock; if (chip_type >= HPT370) { u8 mcr1, mcr4; /* * Reset the state engines. * NOTE: Avoid accidentally enabling the disabled channels. */ pci_read_config_byte (dev, 0x50, &mcr1); pci_read_config_byte (dev, 0x54, &mcr4); pci_write_config_byte(dev, 0x50, (mcr1 | 0x32)); pci_write_config_byte(dev, 0x54, (mcr4 | 0x32)); udelay(100); } /* * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in * the MISC. register to stretch the UltraDMA Tss timing. * NOTE: This register is only writeable via I/O space. */ if (chip_type == HPT371N && clock == ATA_CLOCK_66MHZ) outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c); hpt3xx_disable_fast_irq(dev, 0x50); hpt3xx_disable_fast_irq(dev, 0x54); return 0; } static u8 hpt3xx_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 chip_type = info->chip_type; u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02; /* * The HPT37x uses the CBLID pins as outputs for MA15/MA16 * address lines to access an external EEPROM. To read valid * cable detect state the pins must be enabled as inputs. */ if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) { /* * HPT374 PCI function 1 * - set bit 15 of reg 0x52 to enable TCBLID as input * - set bit 15 of reg 0x56 to enable FCBLID as input */ u8 mcr_addr = hwif->select_data + 2; u16 mcr; pci_read_config_word(dev, mcr_addr, &mcr); pci_write_config_word(dev, mcr_addr, mcr | 0x8000); /* Debounce, then read cable ID register */ udelay(10); pci_read_config_byte(dev, 0x5a, &scr1); pci_write_config_word(dev, mcr_addr, mcr); } else if (chip_type >= HPT370) { /* * HPT370/372 and 374 pcifn 0 * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs */ u8 scr2 = 0; pci_read_config_byte(dev, 0x5b, &scr2); pci_write_config_byte(dev, 0x5b, scr2 & ~1); /* Debounce, then read cable ID register */ udelay(10); pci_read_config_byte(dev, 0x5a, &scr1); pci_write_config_byte(dev, 0x5b, scr2); } else pci_read_config_byte(dev, 0x5a, &scr1); return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; } static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) { struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 chip_type = info->chip_type; /* Cache the channel's MISC. control registers' offset */ hwif->select_data = hwif->channel ? 0x54 : 0x50; /* * HPT3xxN chips have some complications: * * - on 33 MHz PCI we must clock switch * - on 66 MHz PCI we must NOT use the PCI clock */ if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) { /* * Clock is shared between the channels, * so we'll have to serialize them... :-( */ hwif->host->host_flags |= IDE_HFLAG_SERIALIZE; hwif->rw_disk = &hpt3xxn_rw_disk; } } static int __devinit init_dma_hpt366(ide_hwif_t *hwif, const struct ide_port_info *d) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags, base = ide_pci_dma_base(hwif, d); u8 dma_old, dma_new, masterdma = 0, slavedma = 0; if (base == 0) return -1; hwif->dma_base = base; if (ide_pci_check_simplex(hwif, d) < 0) return -1; if (ide_pci_set_master(dev, d->name) < 0) return -1; dma_old = inb(base + 2); local_irq_save(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma); if (masterdma & 0x30) dma_new |= 0x20; if ( slavedma & 0x30) dma_new |= 0x40; if (dma_new != dma_old) outb(dma_new, base + 2); local_irq_restore(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); hwif->extra_base = base + (hwif->channel ? 8 : 16); if (ide_allocate_dma_engine(hwif)) return -1; return 0; } static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2) { if (dev2->irq != dev->irq) { /* FIXME: we need a core pci_set_interrupt() */ dev2->irq = dev->irq; printk(KERN_INFO DRV_NAME " %s: PCI config space interrupt " "fixed\n", pci_name(dev2)); } } static void __devinit hpt371_init(struct pci_dev *dev) { u8 mcr1 = 0; /* * HPT371 chips physically have only one channel, the secondary one, * but the primary channel registers do exist! Go figure... * So, we manually disable the non-existing channel here * (if the BIOS hasn't done this already). */ pci_read_config_byte(dev, 0x50, &mcr1); if (mcr1 & 0x04) pci_write_config_byte(dev, 0x50, mcr1 & ~0x04); } static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) { u8 mcr1 = 0, pin1 = 0, pin2 = 0; /* * Now we'll have to force both channels enabled if * at least one of them has been enabled by BIOS... */ pci_read_config_byte(dev, 0x50, &mcr1); if (mcr1 & 0x30) pci_write_config_byte(dev, 0x50, mcr1 | 0x30); pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1); pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2); if (pin1 != pin2 && dev->irq == dev2->irq) { printk(KERN_INFO DRV_NAME " %s: onboard version of chipset, " "pin1=%d pin2=%d\n", pci_name(dev), pin1, pin2); return 1; } return 0; } #define IDE_HFLAGS_HPT3XX \ (IDE_HFLAG_NO_ATAPI_DMA | \ IDE_HFLAG_OFF_BOARD) static const struct ide_port_ops hpt3xx_port_ops = { .set_pio_mode = hpt3xx_set_pio_mode, .set_dma_mode = hpt3xx_set_mode, .maskproc = hpt3xx_maskproc, .mdma_filter = hpt3xx_mdma_filter, .udma_filter = hpt3xx_udma_filter, .cable_detect = hpt3xx_cable_detect, }; static const struct ide_dma_ops hpt37x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = hpt374_dma_end, .dma_test_irq = hpt374_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_dma_ops hpt370_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = hpt370_dma_start, .dma_end = hpt370_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_clear = hpt370_irq_timeout, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_dma_ops hpt36x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = hpt366_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info hpt366_chipsets[] __devinitdata = { { /* 0: HPT36x */ .name = DRV_NAME, .init_chipset = init_chipset_hpt366, .init_hwif = init_hwif_hpt366, .init_dma = init_dma_hpt366, /* * HPT36x chips have one channel per function and have * both channel enable bits located differently and visible * to both functions -- really stupid design decision... :-( * Bit 4 is for the primary channel, bit 5 for the secondary. */ .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}}, .port_ops = &hpt3xx_port_ops, .dma_ops = &hpt36x_dma_ops, .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }, { /* 1: HPT3xx */ .name = DRV_NAME, .init_chipset = init_chipset_hpt366, .init_hwif = init_hwif_hpt366, .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .port_ops = &hpt3xx_port_ops, .dma_ops = &hpt37x_dma_ops, .host_flags = IDE_HFLAGS_HPT3XX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, } }; /** * hpt366_init_one - called when an HPT366 is found * @dev: the hpt366 device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct hpt_info *info = NULL; struct hpt_info *dyn_info; struct pci_dev *dev2 = NULL; struct ide_port_info d; u8 idx = id->driver_data; u8 rev = dev->revision; int ret; if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1)) return -ENODEV; switch (idx) { case 0: if (rev < 3) info = &hpt36x; else { switch (min_t(u8, rev, 6)) { case 3: info = &hpt370; break; case 4: info = &hpt370a; break; case 5: info = &hpt372; break; case 6: info = &hpt372n; break; } idx++; } break; case 1: info = (rev > 1) ? &hpt372n : &hpt372a; break; case 2: info = (rev > 1) ? &hpt302n : &hpt302; break; case 3: hpt371_init(dev); info = (rev > 1) ? &hpt371n : &hpt371; break; case 4: info = &hpt374; break; case 5: info = &hpt372n; break; } printk(KERN_INFO DRV_NAME ": %s chipset detected\n", info->chip_name); d = hpt366_chipsets[min_t(u8, idx, 1)]; d.udma_mask = info->udma_mask; /* fixup ->dma_ops for HPT370/HPT370A */ if (info == &hpt370 || info == &hpt370a) d.dma_ops = &hpt370_dma_ops; if (info == &hpt36x || info == &hpt374) dev2 = pci_get_slot(dev->bus, dev->devfn + 1); dyn_info = kzalloc(sizeof(*dyn_info) * (dev2 ? 2 : 1), GFP_KERNEL); if (dyn_info == NULL) { printk(KERN_ERR "%s %s: out of memory!\n", d.name, pci_name(dev)); pci_dev_put(dev2); return -ENOMEM; } /* * Copy everything from a static "template" structure * to just allocated per-chip hpt_info structure. */ memcpy(dyn_info, info, sizeof(*dyn_info)); if (dev2) { memcpy(dyn_info + 1, info, sizeof(*dyn_info)); if (info == &hpt374) hpt374_init(dev, dev2); else { if (hpt36x_init(dev, dev2)) d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE; } ret = ide_pci_init_two(dev, dev2, &d, dyn_info); if (ret < 0) { pci_dev_put(dev2); kfree(dyn_info); } return ret; } ret = ide_pci_init_one(dev, &d, dyn_info); if (ret < 0) kfree(dyn_info); return ret; } static void __devexit hpt366_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); struct ide_info *info = host->host_priv; struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL; ide_pci_remove(dev); pci_dev_put(dev2); kfree(info); } static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), 3 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), 4 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), 5 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl); static struct pci_driver hpt366_pci_driver = { .name = "HPT366_IDE", .id_table = hpt366_pci_tbl, .probe = hpt366_init_one, .remove = __devexit_p(hpt366_remove), .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init hpt366_ide_init(void) { return ide_pci_register_driver(&hpt366_pci_driver); } static void __exit hpt366_ide_exit(void) { pci_unregister_driver(&hpt366_pci_driver); } module_init(hpt366_ide_init); module_exit(hpt366_ide_exit); MODULE_AUTHOR("Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
Android-L-Porting-Team/android_kernel_mako
drivers/hwmon/pmbus/max16064.c
9785
3385
/* * Hardware monitoring driver for Maxim MAX16064 * * Copyright (c) 2011 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include "pmbus.h" #define MAX16064_MFR_VOUT_PEAK 0xd4 #define MAX16064_MFR_TEMPERATURE_PEAK 0xd6 static int max16064_read_word_data(struct i2c_client *client, int page, int reg) { int ret; switch (reg) { case PMBUS_VIRT_READ_VOUT_MAX: ret = pmbus_read_word_data(client, page, MAX16064_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: ret = pmbus_read_word_data(client, page, MAX16064_MFR_TEMPERATURE_PEAK); break; case PMBUS_VIRT_RESET_VOUT_HISTORY: case PMBUS_VIRT_RESET_TEMP_HISTORY: ret = 0; break; default: ret = -ENODATA; break; } return ret; } static int max16064_write_word_data(struct i2c_client *client, int page, int reg, u16 word) { int ret; switch (reg) { case PMBUS_VIRT_RESET_VOUT_HISTORY: ret = pmbus_write_word_data(client, page, MAX16064_MFR_VOUT_PEAK, 0); break; case PMBUS_VIRT_RESET_TEMP_HISTORY: ret = pmbus_write_word_data(client, page, MAX16064_MFR_TEMPERATURE_PEAK, 0xffff); break; default: ret = -ENODATA; break; } return ret; } static struct pmbus_driver_info max16064_info = { .pages = 4, .format[PSC_VOLTAGE_IN] = direct, .format[PSC_VOLTAGE_OUT] = direct, .format[PSC_TEMPERATURE] = direct, .m[PSC_VOLTAGE_IN] = 19995, .b[PSC_VOLTAGE_IN] = 0, .R[PSC_VOLTAGE_IN] = -1, .m[PSC_VOLTAGE_OUT] = 19995, .b[PSC_VOLTAGE_OUT] = 0, .R[PSC_VOLTAGE_OUT] = -1, .m[PSC_TEMPERATURE] = -7612, .b[PSC_TEMPERATURE] = 335, .R[PSC_TEMPERATURE] = -3, .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_TEMP, .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, .read_word_data = max16064_read_word_data, .write_word_data = max16064_write_word_data, }; static int max16064_probe(struct i2c_client *client, const struct i2c_device_id *id) { return pmbus_do_probe(client, id, &max16064_info); } static const struct i2c_device_id max16064_id[] = { {"max16064", 0}, {} }; MODULE_DEVICE_TABLE(i2c, max16064_id); /* This is the driver that will be inserted */ static struct i2c_driver max16064_driver = { .driver = { .name = "max16064", }, .probe = max16064_probe, .remove = pmbus_do_remove, .id_table = max16064_id, }; module_i2c_driver(max16064_driver); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064"); MODULE_LICENSE("GPL");
gpl-2.0
sh95119/linux
drivers/net/geneve.c
58
25744
/* * GENEVE: Generic Network Virtualization Encapsulation * * Copyright (c) 2015 Red Hat, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/dst_metadata.h> #include <net/gro_cells.h> #include <net/rtnetlink.h> #include <net/geneve.h> #include <net/protocol.h> #define GENEVE_NETDEV_VER "0.6" #define GENEVE_UDP_PORT 6081 #define GENEVE_N_VID (1u << 24) #define GENEVE_VID_MASK (GENEVE_N_VID - 1) #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); #define GENEVE_VER 0 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) /* per-network namespace private data for this module */ struct geneve_net { struct list_head geneve_list; struct list_head sock_list; }; static int geneve_net_id; /* Pseudo network device */ struct geneve_dev { struct hlist_node hlist; /* vni hash table */ struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ struct geneve_sock *sock; /* socket used for geneve tunnel */ u8 vni[3]; /* virtual network ID for tunnel */ u8 ttl; /* TTL override */ u8 tos; /* TOS override */ struct sockaddr_in remote; /* IPv4 address for link partner */ struct list_head next; /* geneve's per namespace list */ __be16 dst_port; bool collect_md; struct gro_cells gro_cells; }; struct geneve_sock { bool collect_md; struct list_head list; struct socket *sock; struct rcu_head rcu; int refcnt; struct udp_offload udp_offloads; struct hlist_head vni_list[VNI_HASH_SIZE]; }; static inline __u32 geneve_net_vni_hash(u8 vni[3]) { __u32 vnid; vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; return hash_32(vnid, VNI_HASH_BITS); } static __be64 vni_to_tunnel_id(const __u8 *vni) { #ifdef __BIG_ENDIAN return (vni[0] << 16) | (vni[1] << 8) | vni[2]; #else return (__force __be64)(((__force u64)vni[0] << 40) | ((__force u64)vni[1] << 48) | ((__force u64)vni[2] << 56)); #endif } static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, __be32 addr, u8 vni[]) { struct hlist_head *vni_list_head; struct geneve_dev *geneve; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && addr == geneve->remote.sin_addr.s_addr) return geneve; } return NULL; } static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) { return (struct genevehdr *)(udp_hdr(skb) + 1); } /* geneve receive/decap routine */ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) { struct genevehdr *gnvh = geneve_hdr(skb); struct metadata_dst *tun_dst = NULL; struct geneve_dev *geneve = NULL; struct pcpu_sw_netstats *stats; struct iphdr *iph; u8 *vni; __be32 addr; int err; iph = ip_hdr(skb); /* outer IP header... */ if (gs->collect_md) { static u8 zero_vni[3]; vni = zero_vni; addr = 0; } else { vni = gnvh->vni; addr = iph->saddr; } geneve = geneve_lookup(gs, addr, vni); if (!geneve) goto drop; if (ip_tunnel_collect_metadata() || gs->collect_md) { __be16 flags; flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | (gnvh->oam ? TUNNEL_OAM : 0) | (gnvh->critical ? TUNNEL_CRIT_OPT : 0); tun_dst = udp_tun_rx_dst(skb, AF_INET, flags, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); if (!tun_dst) goto drop; /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, gnvh->options, gnvh->opt_len * 4); } else { /* Drop packets w/ critical options, * since we don't support any... */ if (gnvh->critical) goto drop; } skb_reset_mac_header(skb); skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); skb->protocol = eth_type_trans(skb, geneve->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); if (tun_dst) skb_dst_set(skb, &tun_dst->dst); /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) goto drop; skb_reset_network_header(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &iph->saddr, iph->tos); if (err > 1) { ++geneve->dev->stats.rx_frame_errors; ++geneve->dev->stats.rx_errors; goto drop; } } stats = this_cpu_ptr(geneve->dev->tstats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); gro_cells_receive(&geneve->gro_cells, skb); return; drop: /* Consume bad packet */ kfree_skb(skb); } /* Setup stats when device is created */ static int geneve_init(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; err = gro_cells_init(&geneve->gro_cells, dev); if (err) { free_percpu(dev->tstats); return err; } return 0; } static void geneve_uninit(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); gro_cells_destroy(&geneve->gro_cells); free_percpu(dev->tstats); } /* Callback from net/ipv4/udp.c to receive packets */ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; struct geneve_sock *gs; int opts_len; /* Need Geneve and inner Ethernet header to be present */ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) goto error; /* Return packets with reserved bits set */ geneveh = geneve_hdr(skb); if (unlikely(geneveh->ver != GENEVE_VER)) goto error; if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) goto error; opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, htons(ETH_P_TEB))) goto drop; gs = rcu_dereference_sk_user_data(sk); if (!gs) goto drop; geneve_rx(gs, skb); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; error: /* Let the UDP layer deal with the skb */ return 1; } static struct socket *geneve_create_sock(struct net *net, bool ipv6, __be16 port) { struct socket *sock; struct udp_port_cfg udp_conf; int err; memset(&udp_conf, 0, sizeof(udp_conf)); if (ipv6) { udp_conf.family = AF_INET6; } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = htonl(INADDR_ANY); } udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) return ERR_PTR(err); return sock; } static void geneve_notify_add_rx_port(struct geneve_sock *gs) { struct sock *sk = gs->sock->sk; sa_family_t sa_family = sk->sk_family; int err; if (sa_family == AF_INET) { err = udp_add_offload(&gs->udp_offloads); if (err) pr_warn("geneve: udp_add_offload failed with status %d\n", err); } } static int geneve_hlen(struct genevehdr *gh) { return sizeof(*gh) + gh->opt_len * 4; } static struct sk_buff **geneve_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udp_offload *uoff) { struct sk_buff *p, **pp = NULL; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; __be16 type; int flush = 1; off_gnv = skb_gro_offset(skb); hlen = off_gnv + sizeof(*gh); gh = skb_gro_header_fast(skb, off_gnv); if (skb_gro_header_hard(skb, hlen)) { gh = skb_gro_header_slow(skb, hlen, off_gnv); if (unlikely(!gh)) goto out; } if (gh->ver != GENEVE_VER || gh->oam) goto out; gh_len = geneve_hlen(gh); hlen = off_gnv + gh_len; if (skb_gro_header_hard(skb, hlen)) { gh = skb_gro_header_slow(skb, hlen, off_gnv); if (unlikely(!gh)) goto out; } flush = 0; for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; gh2 = (struct genevehdr *)(p->data + off_gnv); if (gh->opt_len != gh2->opt_len || memcmp(gh, gh2, gh_len)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } type = gh->proto_type; rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) { flush = 1; goto out_unlock; } skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); pp = ptype->callbacks.gro_receive(head, skb); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int geneve_gro_complete(struct sk_buff *skb, int nhoff, struct udp_offload *uoff) { struct genevehdr *gh; struct packet_offload *ptype; __be16 type; int gh_len; int err = -ENOSYS; udp_tunnel_gro_complete(skb, nhoff); gh = (struct genevehdr *)(skb->data + nhoff); gh_len = geneve_hlen(gh); type = gh->proto_type; rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); rcu_read_unlock(); return err; } /* Create new listen socket if needed */ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, bool ipv6) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; struct socket *sock; struct udp_tunnel_sock_cfg tunnel_cfg; int h; gs = kzalloc(sizeof(*gs), GFP_KERNEL); if (!gs) return ERR_PTR(-ENOMEM); sock = geneve_create_sock(net, ipv6, port); if (IS_ERR(sock)) { kfree(gs); return ERR_CAST(sock); } gs->sock = sock; gs->refcnt = 1; for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&gs->vni_list[h]); /* Initialize the geneve udp offloads structure */ gs->udp_offloads.port = port; gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive; gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete; geneve_notify_add_rx_port(gs); /* Mark socket as an encapsulation socket */ tunnel_cfg.sk_user_data = gs; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); list_add(&gs->list, &gn->sock_list); return gs; } static void geneve_notify_del_rx_port(struct geneve_sock *gs) { struct sock *sk = gs->sock->sk; sa_family_t sa_family = sk->sk_family; if (sa_family == AF_INET) udp_del_offload(&gs->udp_offloads); } static void geneve_sock_release(struct geneve_sock *gs) { if (--gs->refcnt) return; list_del(&gs->list); geneve_notify_del_rx_port(gs); udp_tunnel_sock_release(gs->sock); kfree_rcu(gs, rcu); } static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, __be16 dst_port) { struct geneve_sock *gs; list_for_each_entry(gs, &gn->sock_list, list) { if (inet_sk(gs->sock->sk)->inet_sport == dst_port && inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) { return gs; } } return NULL; } static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct net *net = geneve->net; struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; __u32 hash; gs = geneve_find_sock(gn, geneve->dst_port); if (gs) { gs->refcnt++; goto out; } gs = geneve_socket_create(net, geneve->dst_port, false); if (IS_ERR(gs)) return PTR_ERR(gs); out: gs->collect_md = geneve->collect_md; geneve->sock = gs; hash = geneve_net_vni_hash(geneve->vni); hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); return 0; } static int geneve_stop(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs = geneve->sock; if (!hlist_unhashed(&geneve->hlist)) hlist_del_rcu(&geneve->hlist); geneve_sock_release(gs); return 0; } static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, bool csum) { struct genevehdr *gnvh; int min_headroom; int err; min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); err = skb_cow_head(skb, min_headroom); if (unlikely(err)) { kfree_skb(skb); goto free_rt; } skb = udp_tunnel_handle_offloads(skb, csum); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto free_rt; } gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); gnvh->ver = GENEVE_VER; gnvh->opt_len = opt_len / 4; gnvh->oam = !!(tun_flags & TUNNEL_OAM); gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT); gnvh->rsvd1 = 0; memcpy(gnvh->vni, vni, 3); gnvh->proto_type = htons(ETH_P_TEB); gnvh->rsvd2 = 0; memcpy(gnvh->options, opt, opt_len); skb_set_inner_protocol(skb, htons(ETH_P_TEB)); return 0; free_rt: ip_rt_put(rt); return err; } static struct rtable *geneve_get_rt(struct sk_buff *skb, struct net_device *dev, struct flowi4 *fl4, struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); struct rtable *rt = NULL; __u8 tos; memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_mark = skb->mark; fl4->flowi4_proto = IPPROTO_UDP; if (info) { fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; fl4->flowi4_tos = RT_TOS(info->key.tos); } else { tos = geneve->tos; if (tos == 1) { const struct iphdr *iip = ip_hdr(skb); tos = ip_tunnel_get_dsfield(iip, skb); } fl4->flowi4_tos = RT_TOS(tos); fl4->daddr = geneve->remote.sin_addr.s_addr; } rt = ip_route_output_key(geneve->net, fl4); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); dev->stats.tx_carrier_errors++; return rt; } if (rt->dst.dev == dev) { /* is this necessary? */ netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); dev->stats.collisions++; ip_rt_put(rt); return ERR_PTR(-EINVAL); } return rt; } /* Convert 64 bit tunnel ID to 24 bit VNI. */ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) { #ifdef __BIG_ENDIAN vni[0] = (__force __u8)(tun_id >> 16); vni[1] = (__force __u8)(tun_id >> 8); vni[2] = (__force __u8)tun_id; #else vni[0] = (__force __u8)((__force u64)tun_id >> 40); vni[1] = (__force __u8)((__force u64)tun_id >> 48); vni[2] = (__force __u8)((__force u64)tun_id >> 56); #endif } static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs = geneve->sock; struct ip_tunnel_info *info = NULL; struct rtable *rt = NULL; const struct iphdr *iip; /* interior IP header */ struct flowi4 fl4; __u8 tos, ttl; __be16 sport; bool udp_csum; __be16 df; int err; if (geneve->collect_md) { info = skb_tunnel_info(skb); if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); goto tx_error; } if (info && ip_tunnel_info_af(info) != AF_INET) goto tx_error; } rt = geneve_get_rt(skb, dev, &fl4, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); dev->stats.tx_carrier_errors++; goto tx_error; } sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); skb_reset_mac_header(skb); iip = ip_hdr(skb); if (info) { const struct ip_tunnel_key *key = &info->key; u8 *opts = NULL; u8 vni[3]; tunnel_id_to_vni(key->tun_id, vni); if (key->tun_flags & TUNNEL_GENEVE_OPT) opts = ip_tunnel_info_opts(info); udp_csum = !!(key->tun_flags & TUNNEL_CSUM); err = geneve_build_skb(rt, skb, key->tun_flags, vni, info->options_len, opts, udp_csum); if (unlikely(err)) goto err; tos = ip_tunnel_ecn_encap(key->tos, iip, skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { udp_csum = false; err = geneve_build_skb(rt, skb, 0, geneve->vni, 0, NULL, udp_csum); if (unlikely(err)) goto err; tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); ttl = geneve->ttl; if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) ttl = 1; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); df = 0; } err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr, tos, ttl, df, sport, geneve->dst_port, !net_eq(geneve->net, dev_net(geneve->dev)), !udp_csum); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; tx_error: dev_kfree_skb(skb); err: dev->stats.tx_errors++; return NETDEV_TX_OK; } static const struct net_device_ops geneve_netdev_ops = { .ndo_init = geneve_init, .ndo_uninit = geneve_uninit, .ndo_open = geneve_open, .ndo_stop = geneve_stop, .ndo_start_xmit = geneve_xmit, .ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static void geneve_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); } static const struct ethtool_ops geneve_ethtool_ops = { .get_drvinfo = geneve_get_drvinfo, .get_link = ethtool_op_get_link, }; /* Info for udev, that this is a virtual tunnel endpoint */ static struct device_type geneve_type = { .name = "geneve", }; /* Initialize the device structure. */ static void geneve_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &geneve_netdev_ops; dev->ethtool_ops = &geneve_ethtool_ops; dev->destructor = free_netdev; SET_NETDEV_DEVTYPE(dev, &geneve_type); dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; netif_keep_dst(dev); dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; eth_hw_addr_random(dev); } static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_ID] = { .type = NLA_U32 }, [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) return -EINVAL; if (data[IFLA_GENEVE_ID]) { __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); if (vni >= GENEVE_VID_MASK) return -ERANGE; } return 0; } static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, __be16 dst_port, __be32 rem_addr, u8 vni[], bool *tun_on_same_port, bool *tun_collect_md) { struct geneve_dev *geneve, *t; *tun_on_same_port = false; *tun_collect_md = false; t = NULL; list_for_each_entry(geneve, &gn->geneve_list, next) { if (geneve->dst_port == dst_port) { *tun_collect_md = geneve->collect_md; *tun_on_same_port = true; } if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && rem_addr == geneve->remote.sin_addr.s_addr && dst_port == geneve->dst_port) t = geneve; } return t; } static int geneve_configure(struct net *net, struct net_device *dev, __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port, bool metadata) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); bool tun_collect_md, tun_on_same_port; int err; if (metadata) { if (rem_addr || vni || tos || ttl) return -EINVAL; } geneve->net = net; geneve->dev = dev; geneve->vni[0] = (vni & 0x00ff0000) >> 16; geneve->vni[1] = (vni & 0x0000ff00) >> 8; geneve->vni[2] = vni & 0x000000ff; geneve->remote.sin_addr.s_addr = rem_addr; if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr))) return -EINVAL; geneve->ttl = ttl; geneve->tos = tos; geneve->dst_port = dst_port; geneve->collect_md = metadata; t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni, &tun_on_same_port, &tun_collect_md); if (t) return -EBUSY; if (metadata) { if (tun_on_same_port) return -EPERM; } else { if (tun_collect_md) return -EPERM; } err = register_netdevice(dev); if (err) return err; list_add(&geneve->next, &gn->geneve_list); return 0; } static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { __be16 dst_port = htons(GENEVE_UDP_PORT); __u8 ttl = 0, tos = 0; bool metadata = false; __be32 rem_addr; __u32 vni; if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) return -EINVAL; vni = nla_get_u32(data[IFLA_GENEVE_ID]); rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (data[IFLA_GENEVE_TTL]) ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); if (data[IFLA_GENEVE_TOS]) tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_PORT]) dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); if (data[IFLA_GENEVE_COLLECT_METADATA]) metadata = true; return geneve_configure(net, dev, rem_addr, vni, ttl, tos, dst_port, metadata); } static void geneve_dellink(struct net_device *dev, struct list_head *head) { struct geneve_dev *geneve = netdev_priv(dev); list_del(&geneve->next); unregister_netdevice_queue(dev, head); } static size_t geneve_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 0; } static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); __u32 vni; vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2]; if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, geneve->remote.sin_addr.s_addr)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) goto nla_put_failure; if (geneve->collect_md) { if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops geneve_link_ops __read_mostly = { .kind = "geneve", .maxtype = IFLA_GENEVE_MAX, .policy = geneve_policy, .priv_size = sizeof(struct geneve_dev), .setup = geneve_setup, .validate = geneve_validate, .newlink = geneve_newlink, .dellink = geneve_dellink, .get_size = geneve_get_size, .fill_info = geneve_fill_info, }; struct net_device *geneve_dev_create_fb(struct net *net, const char *name, u8 name_assign_type, u16 dst_port) { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; int err; memset(tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, &geneve_link_ops, tb); if (IS_ERR(dev)) return dev; err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true); if (err) { free_netdev(dev); return ERR_PTR(err); } return dev; } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); static __net_init int geneve_init_net(struct net *net) { struct geneve_net *gn = net_generic(net, geneve_net_id); INIT_LIST_HEAD(&gn->geneve_list); INIT_LIST_HEAD(&gn->sock_list); return 0; } static void __net_exit geneve_exit_net(struct net *net) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *geneve, *next; struct net_device *dev, *aux; LIST_HEAD(list); rtnl_lock(); /* gather any geneve devices that were moved into this ns */ for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &geneve_link_ops) unregister_netdevice_queue(dev, &list); /* now gather any other geneve devices that were created in this ns */ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { /* If geneve->dev is in the same netns, it was already added * to the list by the previous loop. */ if (!net_eq(dev_net(geneve->dev), net)) unregister_netdevice_queue(geneve->dev, &list); } /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); } static struct pernet_operations geneve_net_ops = { .init = geneve_init_net, .exit = geneve_exit_net, .id = &geneve_net_id, .size = sizeof(struct geneve_net), }; static int __init geneve_init_module(void) { int rc; rc = register_pernet_subsys(&geneve_net_ops); if (rc) goto out1; rc = rtnl_link_register(&geneve_link_ops); if (rc) goto out2; return 0; out2: unregister_pernet_subsys(&geneve_net_ops); out1: return rc; } late_initcall(geneve_init_module); static void __exit geneve_cleanup_module(void) { rtnl_link_unregister(&geneve_link_ops); unregister_pernet_subsys(&geneve_net_ops); } module_exit(geneve_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(GENEVE_NETDEV_VER); MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("geneve");
gpl-2.0
sigma-random/asuswrt-merlin
release/src-rt-7.x.main/src/cfe/cfe/ui/ui_test_flash.c
58
5015
/* ********************************************************************* * Broadcom Common Firmware Environment (CFE) * * Flash Test commands File: ui_test_flash.c * * Some commands to test the flash device interface. * * Author: Mitch Lichtenberg (mpl@broadcom.com) * ********************************************************************* * * Copyright 2000,2001,2002,2003 * Broadcom Corporation. All rights reserved. * * This software is furnished under license and may be used and * copied only in accordance with the following terms and * conditions. Subject to these conditions, you may download, * copy, install, use, modify and distribute modified or unmodified * copies of this software in source and/or binary form. No title * or ownership is transferred hereby. * * 1) Any source code used, modified or distributed must reproduce * and retain this copyright notice and list of conditions * as they appear in the source file. * * 2) No right is granted to use any trade name, trademark, or * logo of Broadcom Corporation. The "Broadcom Corporation" * name may not be used to endorse or promote products derived * from this software without the prior written permission of * Broadcom Corporation. * * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR * PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT * SHALL BROADCOM BE LIABLE FOR ANY DAMAGES WHATSOEVER, AND IN * PARTICULAR, BROADCOM SHALL NOT BE LIABLE FOR DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE), EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. ********************************************************************* */ #include "lib_types.h" #include "lib_string.h" #include "lib_queue.h" #include "lib_malloc.h" #include "lib_printf.h" #include "cfe_iocb.h" #include "cfe_device.h" #include "cfe_console.h" #include "cfe_devfuncs.h" #include "cfe_timer.h" #include "cfe_ioctl.h" #include "cfe_error.h" #include "ui_command.h" int ui_init_flashtestcmds(void); static int ui_cmd_flashtest(ui_cmdline_t *cmd,int argc,char *argv[]); //static int ui_cmd_readnvram(ui_cmdline_t *cmd,int argc,char *argv[]); //static int ui_cmd_erasenvram(ui_cmdline_t *cmd,int argc,char *argv[]); int ui_init_flashtestcmds(void) { cmd_addcmd("show flash", ui_cmd_flashtest, NULL, "Display information about a flash device.", "show flash [-sectors]", "-sectors;Display sector information"); return 0; } static char *flashtypes[] = { "Unknown","SRAM","ROM","Flash" }; static int ui_cmd_flashtest(ui_cmdline_t *cmd,int argc,char *argv[]) { flash_info_t info; int fd; int retlen; int res = 0; int idx; flash_sector_t sector; nvram_info_t nvraminfo; char *devname; int showsectors; devname = cmd_getarg(cmd,0); if (!devname) return ui_showusage(cmd); showsectors = cmd_sw_isset(cmd,"-sectors"); fd = cfe_open(devname); if (fd < 0) { ui_showerror(fd,"Could not open flash device %s",devname); return fd; } res = cfe_ioctl(fd,IOCTL_FLASH_GETINFO,(uint8_t *) &info,sizeof(flash_info_t),&retlen,0); if (res == 0) { printf("FLASH: Base %016llX size %08X type %02X(%s) flags %08X\n", info.flash_base,info.flash_size,info.flash_type,flashtypes[info.flash_type], info.flash_flags); } else { printf("FLASH: Could not determine flash information\n"); } res = cfe_ioctl(fd,IOCTL_NVRAM_GETINFO,(uint8_t *) &nvraminfo,sizeof(nvram_info_t),&retlen,0); if (res == 0) { printf("NVRAM: Offset %08X Size %08X EraseFlg %d\n", nvraminfo.nvram_offset,nvraminfo.nvram_size,nvraminfo.nvram_eraseflg); } else { printf("NVRAM: Not supported by this flash\n"); } if (showsectors && (info.flash_type == FLASH_TYPE_FLASH)) { printf("Flash sector information:\n"); idx = 0; for (;;) { sector.flash_sector_idx = idx; res = cfe_ioctl(fd,IOCTL_FLASH_GETSECTORS,(uint8_t *) &sector,sizeof(flash_sector_t),&retlen,0); if (res != 0) { printf("ioctl error\n"); break; } if (sector.flash_sector_status == FLASH_SECTOR_INVALID) break; printf(" Sector %d offset %08X size %d\n", sector.flash_sector_idx, sector.flash_sector_offset, sector.flash_sector_size); idx++; } } cfe_close(fd); return 0; }
gpl-2.0
chenyu105/linux
kernel/irq/manage.c
58
53106
/* * linux/kernel/irq/manage.c * * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006 Thomas Gleixner * * This file contains driver APIs to the irq subsystem. */ #define pr_fmt(fmt) "genirq: " fmt #include <linux/irq.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/task_work.h> #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING __read_mostly bool force_irqthreads; static int __init setup_forced_irqthreads(char *arg) { force_irqthreads = true; return 0; } early_param("threadirqs", setup_forced_irqthreads); #endif static void __synchronize_hardirq(struct irq_desc *desc) { bool inprogress; do { unsigned long flags; /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. */ while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ raw_spin_lock_irqsave(&desc->lock, flags); inprogress = irqd_irq_inprogress(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); /* Oops, that failed? */ } while (inprogress); } /** * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending hard IRQ handlers for this * interrupt to complete before returning. If you use this * function while holding a resource the IRQ handler may need you * will deadlock. It does not take associated threaded handlers * into account. * * Do not use this for shutdown scenarios where you must be sure * that all parts (hardirq and threaded handler) have completed. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. */ bool synchronize_hardirq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { __synchronize_hardirq(desc); return !atomic_read(&desc->threads_active); } return true; } EXPORT_SYMBOL(synchronize_hardirq); /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { __synchronize_hardirq(desc); /* * We made sure that no hardirq handler is * running. Now verify that no threaded handlers are * active. */ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } } EXPORT_SYMBOL(synchronize_irq); #ifdef CONFIG_SMP cpumask_var_t irq_default_affinity; static int __irq_can_set_affinity(struct irq_desc *desc) { if (!desc || !irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) return 0; return 1; } /** * irq_can_set_affinity - Check if the affinity of a given irq can be set * @irq: Interrupt to check * */ int irq_can_set_affinity(unsigned int irq) { return __irq_can_set_affinity(irq_to_desc(irq)); } /** * irq_set_thread_affinity - Notify irq threads to adjust affinity * @desc: irq descriptor which has affitnity changed * * We just set IRQTF_AFFINITY and delegate the affinity setting * to the interrupt thread itself. We can not call * set_cpus_allowed_ptr() here as we hold desc->lock and this * code can be called from hard interrupt context. */ void irq_set_thread_affinity(struct irq_desc *desc) { struct irqaction *action = desc->action; while (action) { if (action->thread) set_bit(IRQTF_AFFINITY, &action->thread_flags); action = action->next; } } #ifdef CONFIG_GENERIC_PENDING_IRQ static inline bool irq_can_move_pcntxt(struct irq_data *data) { return irqd_can_move_in_process_context(data); } static inline bool irq_move_pending(struct irq_data *data) { return irqd_is_setaffinity_pending(data); } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { cpumask_copy(desc->pending_mask, mask); } static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { cpumask_copy(mask, desc->pending_mask); } #else static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } static inline bool irq_move_pending(struct irq_data *data) { return false; } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } #endif int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); int ret; ret = chip->irq_set_affinity(data, mask, force); switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: cpumask_copy(desc->irq_common_data.affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); ret = 0; } return ret; } int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); int ret = 0; if (!chip || !chip->irq_set_affinity) return -EINVAL; if (irq_can_move_pcntxt(data)) { ret = irq_do_set_affinity(data, mask, force); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); } if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); schedule_work(&desc->affinity_notify->work); } irqd_set(data, IRQD_AFFINITY_SET); return ret; } int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; if (!desc) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; desc->affinity_hint = m; irq_put_desc_unlock(desc, flags); /* set the initial affinity to prevent every interrupt being on CPU0 */ if (m) __irq_set_affinity(irq, m, false); return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); static void irq_affinity_notify(struct work_struct *work) { struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) goto out; raw_spin_lock_irqsave(&desc->lock, flags); if (irq_move_pending(&desc->irq_data)) irq_get_pending(cpumask, desc); else cpumask_copy(cpumask, desc->irq_common_data.affinity); raw_spin_unlock_irqrestore(&desc->lock, flags); notify->notify(notify, cpumask); free_cpumask_var(cpumask); out: kref_put(&notify->kref, notify->release); } /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification * @notify: Context for notification, or %NULL to disable * notification. Function pointers must be initialised; * the other fields will be initialised by this function. * * Must be called in process context. Notification may only be enabled * after the IRQ is allocated and must be disabled before the IRQ is * freed using free_irq(). */ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); struct irq_affinity_notify *old_notify; unsigned long flags; /* The release function is promised process context */ might_sleep(); if (!desc) return -EINVAL; /* Complete initialisation of *notify */ if (notify) { notify->irq = irq; kref_init(&notify->kref); INIT_WORK(&notify->work, irq_affinity_notify); } raw_spin_lock_irqsave(&desc->lock, flags); old_notify = desc->affinity_notify; desc->affinity_notify = notify; raw_spin_unlock_irqrestore(&desc->lock, flags); if (old_notify) kref_put(&old_notify->kref, old_notify->release); return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); #ifndef CONFIG_AUTO_IRQ_AFFINITY /* * Generic version of the affinity autoselector. */ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) { struct cpumask *set = irq_default_affinity; int node = irq_desc_get_node(desc); /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!__irq_can_set_affinity(desc)) return 0; /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_common_data.affinity, cpu_online_mask)) set = desc->irq_common_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(mask, cpu_online_mask, set); if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); /* make sure at least one of the cpus in nodemask is online */ if (cpumask_intersects(mask, nodemask)) cpumask_and(mask, mask, nodemask); } irq_do_set_affinity(&desc->irq_data, mask, false); return 0; } #else /* Wrapper for ALPHA specific affinity selector magic */ static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask) { return irq_select_affinity(irq_desc_get_irq(d)); } #endif /* * Called when affinity is set via /proc/irq */ int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; raw_spin_lock_irqsave(&desc->lock, flags); ret = setup_affinity(desc, mask); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } #else static inline int setup_affinity(struct irq_desc *desc, struct cpumask *mask) { return 0; } #endif /** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt * @irq: interrupt number to set affinity * @vcpu_info: vCPU specific data * * This function uses the vCPU specific data to set the vCPU * affinity for an irq. The vCPU specific data is passed from * outside, such as KVM. One example code path is as below: * KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); struct irq_data *data; struct irq_chip *chip; int ret = -ENOSYS; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); if (chip && chip->irq_set_vcpu_affinity) ret = chip->irq_set_vcpu_affinity(data, vcpu_info); irq_put_desc_unlock(desc, flags); return ret; } EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); void __disable_irq(struct irq_desc *desc) { if (!desc->depth++) irq_disable(desc); } static int __disable_irq_nosync(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; __disable_irq(desc); irq_put_desc_busunlock(desc, flags); return 0; } /** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and Enables are * nested. * Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { __disable_irq_nosync(irq); } EXPORT_SYMBOL(disable_irq_nosync); /** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq) { if (!__disable_irq_nosync(irq)) synchronize_irq(irq); } EXPORT_SYMBOL(disable_irq); /** * disable_hardirq - disables an irq and waits for hardirq completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending hard IRQ handlers for this * interrupt to complete before returning. If you use this function while * holding a resource the hard IRQ handler may need you will deadlock. * * When used to optimistically disable an interrupt from atomic context * the return value must be checked. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. */ bool disable_hardirq(unsigned int irq) { if (!__disable_irq_nosync(irq)) return synchronize_hardirq(irq); return false; } EXPORT_SYMBOL_GPL(disable_hardirq); void __enable_irq(struct irq_desc *desc) { switch (desc->depth) { case 0: err_out: WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq_desc_get_irq(desc)); break; case 1: { if (desc->istate & IRQS_SUSPENDED) goto err_out; /* Prevent probing on this irq: */ irq_settings_set_noprobe(desc); irq_enable(desc); check_irq_resend(desc); /* fall-through */ } default: desc->depth--; } } /** * enable_irq - enable handling of an irq * @irq: Interrupt to enable * * Undoes the effect of one call to disable_irq(). If this * matches the last disable, processing of interrupts on this * IRQ line is re-enabled. * * This function may be called from IRQ context only when * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return; if (WARN(!desc->irq_data.chip, KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) goto out; __enable_irq(desc); out: irq_put_desc_busunlock(desc, flags); } EXPORT_SYMBOL(enable_irq); static int set_irq_wake_real(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) return 0; if (desc->irq_data.chip->irq_set_wake) ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); return ret; } /** * irq_set_irq_wake - control irq power management wakeup * @irq: interrupt to control * @on: enable/disable power management wakeup * * Enable/disable power management wakeup mode, which is * disabled by default. Enables and disables must match, * just as they match for non-wakeup mode support. * * Wakeup mode lets this IRQ wake the system from sleep * states like "suspend to RAM". */ int irq_set_irq_wake(unsigned int irq, unsigned int on) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); int ret = 0; if (!desc) return -EINVAL; /* wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ if (on) { if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 0; else irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); } } else { if (desc->wake_depth == 0) { WARN(1, "Unbalanced IRQ %d wake disable\n", irq); } else if (--desc->wake_depth == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 1; else irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); } } irq_put_desc_busunlock(desc, flags); return ret; } EXPORT_SYMBOL(irq_set_irq_wake); /* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available * for driver use. */ int can_request_irq(unsigned int irq, unsigned long irqflags) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); int canrequest = 0; if (!desc) return 0; if (irq_settings_can_request(desc)) { if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) canrequest = 1; } irq_put_desc_unlock(desc, flags); return canrequest; } int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) { struct irq_chip *chip = desc->irq_data.chip; int ret, unmask = 0; if (!chip || !chip->irq_set_type) { /* * IRQF_TRIGGER_* but the PIC does not support multiple * flow-types? */ pr_debug("No set_type function for IRQ %d (%s)\n", irq_desc_get_irq(desc), chip ? (chip->name ? : "unknown") : "unknown"); return 0; } flags &= IRQ_TYPE_SENSE_MASK; if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { if (!irqd_irq_masked(&desc->irq_data)) mask_irq(desc); if (!irqd_irq_disabled(&desc->irq_data)) unmask = 1; } /* caller masked out all except trigger mode flags */ ret = chip->irq_set_type(&desc->irq_data, flags); switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); irq_settings_set_trigger_mask(desc, flags); irqd_clear(&desc->irq_data, IRQD_LEVEL); irq_settings_clr_level(desc); if (flags & IRQ_TYPE_LEVEL_MASK) { irq_settings_set_level(desc); irqd_set(&desc->irq_data, IRQD_LEVEL); } ret = 0; break; default: pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", flags, irq_desc_get_irq(desc), chip->irq_set_type); } if (unmask) unmask_irq(desc); return ret; } #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->parent_irq = parent_irq; irq_put_desc_unlock(desc, flags); return 0; } #endif /* * Default primary interrupt handler for threaded interrupts. Is * assigned as primary handler when request_threaded_irq is called * with handler == NULL. Useful for oneshot interrupts. */ static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) { return IRQ_WAKE_THREAD; } /* * Primary handler for nested threaded interrupts. Should never be * called. */ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) { WARN(1, "Primary handler called for nested irq %d\n", irq); return IRQ_NONE; } static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) { WARN(1, "Secondary action handler called for irq %d\n", irq); return IRQ_NONE; } static int irq_wait_for_interrupt(struct irqaction *action) { set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; } schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return -1; } /* * Oneshot interrupts keep the irq line masked until the threaded * handler finished. unmask if the interrupt has not been disabled and * is marked MASKED. */ static void irq_finalize_oneshot(struct irq_desc *desc, struct irqaction *action) { if (!(desc->istate & IRQS_ONESHOT) || action->handler == irq_forced_secondary_handler) return; again: chip_bus_lock(desc); raw_spin_lock_irq(&desc->lock); /* * Implausible though it may be we need to protect us against * the following scenario: * * The thread is faster done than the hard interrupt handler * on the other CPU. If we unmask the irq line then the * interrupt can come in again and masks the line, leaves due * to IRQS_INPROGRESS and the irq line is masked forever. * * This also serializes the state of shared oneshot handlers * versus "desc->threads_onehsot |= action->thread_mask;" in * irq_wake_thread(). See the comment there which explains the * serialization. */ if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); cpu_relax(); goto again; } /* * Now check again, whether the thread should run. Otherwise * we would clear the threads_oneshot bit of this thread which * was just set. */ if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) goto out_unlock; desc->threads_oneshot &= ~action->thread_mask; if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_threaded_irq(desc); out_unlock: raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); } #ifdef CONFIG_SMP /* * Check whether we need to change the affinity of the interrupt thread. */ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; bool valid = true; if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; /* * In case we are out of memory we set IRQTF_AFFINITY again and * try again next time */ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { set_bit(IRQTF_AFFINITY, &action->thread_flags); return; } raw_spin_lock_irq(&desc->lock); /* * This code is triggered unconditionally. Check the affinity * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. */ if (desc->irq_common_data.affinity) cpumask_copy(mask, desc->irq_common_data.affinity); else valid = false; raw_spin_unlock_irq(&desc->lock); if (valid) set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); } #else static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif /* * Interrupts which are not explicitely requested as threaded * interrupts rely on the implicit bh/preempt disable of the hard irq * context. So we need to disable bh here to avoid deadlocks and other * side effects. */ static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret; local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); local_bh_enable(); return ret; } /* * Interrupts explicitly requested as threaded interrupts want to be * preemtible - many of them need to sleep and wait for slow busses to * complete. */ static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret; ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); return ret; } static void wake_threads_waitq(struct irq_desc *desc) { if (atomic_dec_and_test(&desc->threads_active)) wake_up(&desc->wait_for_threads); } static void irq_thread_dtor(struct callback_head *unused) { struct task_struct *tsk = current; struct irq_desc *desc; struct irqaction *action; if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) return; action = kthread_data(tsk); pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", tsk->comm, tsk->pid, action->irq); desc = irq_to_desc(action->irq); /* * If IRQTF_RUNTHREAD is set, we need to decrement * desc->threads_active and wake possible waiters. */ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) wake_threads_waitq(desc); /* Prevent a stale desc->threads_oneshot */ irq_finalize_oneshot(desc, action); } static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) { struct irqaction *secondary = action->secondary; if (WARN_ON_ONCE(!secondary)) return; raw_spin_lock_irq(&desc->lock); __irq_wake_thread(desc, secondary); raw_spin_unlock_irq(&desc->lock); } /* * Interrupt handler thread */ static int irq_thread(void *data) { struct callback_head on_exit_work; struct irqaction *action = data; struct irq_desc *desc = irq_to_desc(action->irq); irqreturn_t (*handler_fn)(struct irq_desc *desc, struct irqaction *action); if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, &action->thread_flags)) handler_fn = irq_forced_thread_fn; else handler_fn = irq_thread_fn; init_task_work(&on_exit_work, irq_thread_dtor); task_work_add(current, &on_exit_work, false); irq_thread_check_affinity(desc, action); while (!irq_wait_for_interrupt(action)) { irqreturn_t action_ret; irq_thread_check_affinity(desc, action); action_ret = handler_fn(desc, action); if (action_ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); wake_threads_waitq(desc); } /* * This is the regular exit path. __free_irq() is stopping the * thread via kthread_stop() after calling * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the * oneshot mask bit can be set. We cannot verify that as we * cannot touch the oneshot mask at this point anymore as * __setup_irq() might have given out currents thread_mask * again. */ task_work_cancel(current, irq_thread_dtor); return 0; } /** * irq_wake_thread - wake the irq thread for the action identified by dev_id * @irq: Interrupt line * @dev_id: Device identity for which the thread should be woken * */ void irq_wake_thread(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; raw_spin_lock_irqsave(&desc->lock, flags); for (action = desc->action; action; action = action->next) { if (action->dev_id == dev_id) { if (action->thread) __irq_wake_thread(desc, action); break; } } raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL_GPL(irq_wake_thread); static int irq_setup_forced_threading(struct irqaction *new) { if (!force_irqthreads) return 0; if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) return 0; new->flags |= IRQF_ONESHOT; /* * Handle the case where we have a real primary handler and a * thread handler. We force thread them as well by creating a * secondary action. */ if (new->handler != irq_default_primary_handler && new->thread_fn) { /* Allocate the secondary action */ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!new->secondary) return -ENOMEM; new->secondary->handler = irq_forced_secondary_handler; new->secondary->thread_fn = new->thread_fn; new->secondary->dev_id = new->dev_id; new->secondary->irq = new->irq; new->secondary->name = new->name; } /* Deal with the primary handler */ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); new->thread_fn = new->handler; new->handler = irq_default_primary_handler; return 0; } static int irq_request_resources(struct irq_desc *desc) { struct irq_data *d = &desc->irq_data; struct irq_chip *c = d->chip; return c->irq_request_resources ? c->irq_request_resources(d) : 0; } static void irq_release_resources(struct irq_desc *desc) { struct irq_data *d = &desc->irq_data; struct irq_chip *c = d->chip; if (c->irq_release_resources) c->irq_release_resources(d); } static int setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) { struct task_struct *t; struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; if (!secondary) { t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); } else { t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, new->name); param.sched_priority -= 1; } if (IS_ERR(t)) return PTR_ERR(t); sched_setscheduler_nocheck(t, SCHED_FIFO, &param); /* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code * references an already freed task_struct. */ get_task_struct(t); new->thread = t; /* * Tell the thread to set its affinity. This is * important for shared interrupt handlers as we do * not invoke setup_affinity() for the secondary * handlers as everything is already set up. Even for * interrupts marked with IRQF_NO_BALANCE this is * correct as we want the thread to move to the cpu(s) * on which the requesting code placed the interrupt. */ set_bit(IRQTF_AFFINITY, &new->thread_flags); return 0; } /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. */ static int __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) { struct irqaction *old, **old_ptr; unsigned long flags, thread_mask = 0; int ret, nested, shared = 0; cpumask_var_t mask; if (!desc) return -EINVAL; if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; if (!try_module_get(desc->owner)) return -ENODEV; new->irq = irq; /* * Check whether the interrupt nests into another interrupt * thread. */ nested = irq_settings_is_nested_thread(desc); if (nested) { if (!new->thread_fn) { ret = -EINVAL; goto out_mput; } /* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the * dummy function which warns when called. */ new->handler = irq_nested_primary_handler; } else { if (irq_settings_can_thread(desc)) { ret = irq_setup_forced_threading(new); if (ret) goto out_mput; } } /* * Create a handler thread when a thread function is supplied * and the interrupt does not nest into another interrupt * thread. */ if (new->thread_fn && !nested) { ret = setup_irq_thread(new, irq, false); if (ret) goto out_mput; if (new->secondary) { ret = setup_irq_thread(new->secondary, irq, true); if (ret) goto out_thread; } } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { ret = -ENOMEM; goto out_thread; } /* * Drivers are often written to work w/o knowledge about the * underlying irq chip implementation, so a request for a * threaded irq without a primary hard irq context handler * requires the ONESHOT flag to be set. Some irq chips like * MSI based interrupts are per se one shot safe. Check the * chip flags, so we can avoid the unmask dance at the end of * the threaded handler for those. */ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) new->flags &= ~IRQF_ONESHOT; /* * The following block of code has to be executed atomically */ raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { /* * Can't share interrupts unless both agree to and are * the same type (level, edge, polarity). So both flag * fields must have IRQF_SHARED set and the bits which * set the trigger type must match. Also all must * agree on ONESHOT. */ if (!((old->flags & new->flags) & IRQF_SHARED) || ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || ((old->flags ^ new->flags) & IRQF_ONESHOT)) goto mismatch; /* All handlers must agree on per-cpuness */ if ((old->flags & IRQF_PERCPU) != (new->flags & IRQF_PERCPU)) goto mismatch; /* add new interrupt at end of irq queue */ do { /* * Or all existing action->thread_mask bits, * so we can find the next zero bit for this * new action. */ thread_mask |= old->thread_mask; old_ptr = &old->next; old = *old_ptr; } while (old); shared = 1; } /* * Setup the thread mask for this irqaction for ONESHOT. For * !ONESHOT irqs the thread mask is 0 so we can avoid a * conditional in irq_wake_thread(). */ if (new->flags & IRQF_ONESHOT) { /* * Unlikely to have 32 resp 64 irqs sharing one line, * but who knows. */ if (thread_mask == ~0UL) { ret = -EBUSY; goto out_mask; } /* * The thread_mask for the action is or'ed to * desc->thread_active to indicate that the * IRQF_ONESHOT thread handler has been woken, but not * yet finished. The bit is cleared when a thread * completes. When all threads of a shared interrupt * line have completed desc->threads_active becomes * zero and the interrupt line is unmasked. See * handle.c:irq_wake_thread() for further information. * * If no thread is woken by primary (hard irq context) * interrupt handlers, then desc->threads_active is * also checked for zero to unmask the irq line in the * affected hard irq flow handlers * (handle_[fasteoi|level]_irq). * * The new action gets the first zero bit of * thread_mask assigned. See the loop above which or's * all existing action->thread_mask bits. */ new->thread_mask = 1 << ffz(thread_mask); } else if (new->handler == irq_default_primary_handler && !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { /* * The interrupt was requested with handler = NULL, so * we use the default primary handler for it. But it * does not have the oneshot flag set. In combination * with level interrupts this is deadly, because the * default primary handler just wakes the thread, then * the irq lines is reenabled, but the device still * has the level irq asserted. Rinse and repeat.... * * While this works for edge type interrupts, we play * it safe and reject unconditionally because we can't * say for sure which type this interrupt really * has. The type flags are unreliable as the * underlying chip implementation can override them. */ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", irq); ret = -EINVAL; goto out_mask; } if (!shared) { ret = irq_request_resources(desc); if (ret) { pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", new->name, irq, desc->irq_data.chip->name); goto out_mask; } init_waitqueue_head(&desc->wait_for_threads); /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, new->flags & IRQF_TRIGGER_MASK); if (ret) goto out_mask; } desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ IRQS_ONESHOT | IRQS_WAITING); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); if (new->flags & IRQF_PERCPU) { irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_settings_set_per_cpu(desc); } if (new->flags & IRQF_ONESHOT) desc->istate |= IRQS_ONESHOT; if (irq_settings_can_autoenable(desc)) irq_startup(desc, true); else /* Undo nested disables: */ desc->depth = 1; /* Exclude IRQ from balancing if requested */ if (new->flags & IRQF_NOBALANCING) { irq_settings_set_no_balancing(desc); irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } /* Set default affinity mask once everything is setup */ setup_affinity(desc, mask); } else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; unsigned int omsk = irq_settings_get_trigger_mask(desc); if (nmsk != omsk) /* hope the handler works with current trigger mode */ pr_warning("irq %d uses trigger mode %u; requested %u\n", irq, nmsk, omsk); } *old_ptr = new; irq_pm_install_action(desc, new); /* Reset broken irq detection when installing new handler */ desc->irq_count = 0; desc->irqs_unhandled = 0; /* * Check whether we disabled the irq via the spurious handler * before. Reenable it and give it another chance. */ if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { desc->istate &= ~IRQS_SPURIOUS_DISABLED; __enable_irq(desc); } raw_spin_unlock_irqrestore(&desc->lock, flags); /* * Strictly no need to wake it up, but hung_task complains * when no hard interrupt wakes the thread up. */ if (new->thread) wake_up_process(new->thread); if (new->secondary) wake_up_process(new->secondary->thread); register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); free_cpumask_var(mask); return 0; mismatch: if (!(new->flags & IRQF_PROBE_SHARED)) { pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", irq, new->flags, new->name, old->flags, old->name); #ifdef CONFIG_DEBUG_SHIRQ dump_stack(); #endif } ret = -EBUSY; out_mask: raw_spin_unlock_irqrestore(&desc->lock, flags); free_cpumask_var(mask); out_thread: if (new->thread) { struct task_struct *t = new->thread; new->thread = NULL; kthread_stop(t); put_task_struct(t); } if (new->secondary && new->secondary->thread) { struct task_struct *t = new->secondary->thread; new->secondary->thread = NULL; kthread_stop(t); put_task_struct(t); } out_mput: module_put(desc->owner); return ret; } /** * setup_irq - setup an interrupt * @irq: Interrupt line to setup * @act: irqaction for the interrupt * * Used to statically setup interrupts in the early boot process. */ int setup_irq(unsigned int irq, struct irqaction *act) { int retval; struct irq_desc *desc = irq_to_desc(irq); if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) return -EINVAL; chip_bus_lock(desc); retval = __setup_irq(irq, desc, act); chip_bus_sync_unlock(desc); return retval; } EXPORT_SYMBOL_GPL(setup_irq); /* * Internal function to unregister an irqaction - used to free * regular and special interrupts that are part of the architecture. */ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action, **action_ptr; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; raw_spin_lock_irqsave(&desc->lock, flags); /* * There can be multiple actions per IRQ descriptor, find the right * one based on the dev_id: */ action_ptr = &desc->action; for (;;) { action = *action_ptr; if (!action) { WARN(1, "Trying to free already-free IRQ %d\n", irq); raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } if (action->dev_id == dev_id) break; action_ptr = &action->next; } /* Found it - now remove it from the list of entries: */ *action_ptr = action->next; irq_pm_remove_action(desc, action); /* If this was the last handler, shut down the IRQ line: */ if (!desc->action) { irq_settings_clr_disable_unlazy(desc); irq_shutdown(desc); irq_release_resources(desc); } #ifdef CONFIG_SMP /* make sure affinity_hint is cleaned up */ if (WARN_ON_ONCE(desc->affinity_hint)) desc->affinity_hint = NULL; #endif raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); /* Make sure it's not being used on another CPU: */ synchronize_irq(irq); #ifdef CONFIG_DEBUG_SHIRQ /* * It's a shared IRQ -- the driver ought to be prepared for an IRQ * event to happen even now it's being freed, so let's make sure that * is so by doing an extra call to the handler .... * * ( We do this after actually deregistering it, to make sure that a * 'real' IRQ doesn't run in * parallel with our fake. ) */ if (action->flags & IRQF_SHARED) { local_irq_save(flags); action->handler(irq, dev_id); local_irq_restore(flags); } #endif if (action->thread) { kthread_stop(action->thread); put_task_struct(action->thread); if (action->secondary && action->secondary->thread) { kthread_stop(action->secondary->thread); put_task_struct(action->secondary->thread); } } module_put(desc->owner); kfree(action->secondary); return action; } /** * remove_irq - free an interrupt * @irq: Interrupt line to free * @act: irqaction for the interrupt * * Used to remove interrupts statically setup by the early boot process. */ void remove_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) __free_irq(irq, act->dev_id); } EXPORT_SYMBOL_GPL(remove_irq); /** * free_irq - free an interrupt allocated with request_irq * @irq: Interrupt line to free * @dev_id: Device identity to free * * Remove an interrupt handler. The handler is removed and if the * interrupt line is no longer in use by any driver it is disabled. * On a shared IRQ the caller must ensure the interrupt is disabled * on the card it drives before calling this function. The function * does not return until any executing interrupts for this IRQ * have completed. * * This function must not be called from interrupt context. */ void free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; #ifdef CONFIG_SMP if (WARN_ON(desc->affinity_notify)) desc->affinity_notify = NULL; #endif chip_bus_lock(desc); kfree(__free_irq(irq, dev_id)); chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(free_irq); /** * request_threaded_irq - allocate an interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Primary handler for threaded interrupts * If NULL and thread_fn != NULL the default * primary handler is installed * @thread_fn: Function called from the irq handler thread * If NULL, no irq thread is created * @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. From the point this * call is made your handler function may be invoked. Since * your handler function must clear any interrupt the board * raises, you must take care both to initialise your hardware * and to set up the interrupt handler in the right order. * * If you want to set up a threaded irq handler for your device * then you need to supply @handler and @thread_fn. @handler is * still called in hard interrupt context and has to check * whether the interrupt originates from the device. If yes it * needs to disable the interrupt on the device and return * IRQ_WAKE_THREAD which will wake up the handler thread and run * @thread_fn. This split handler design is necessary to support * shared interrupts. * * Dev_id must be globally unique. Normally the address of the * device data structure is used as the cookie. Since the handler * receives this value it makes sense to use it. * * If your interrupt is shared you must pass a non NULL dev_id * as this is required when freeing the interrupt. * * Flags: * * IRQF_SHARED Interrupt is shared * IRQF_TRIGGER_* Specify active edge(s) or level * */ int request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long irqflags, const char *devname, void *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; /* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). * * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and * it cannot be set along with IRQF_NO_SUSPEND. */ if (((irqflags & IRQF_SHARED) && !dev_id) || (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) return -EINVAL; desc = irq_to_desc(irq); if (!desc) return -EINVAL; if (!irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return -EINVAL; if (!handler) { if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; } action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->thread_fn = thread_fn; action->flags = irqflags; action->name = devname; action->dev_id = dev_id; chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); chip_bus_sync_unlock(desc); if (retval) { kfree(action->secondary); kfree(action); } #ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it * to happen immediately, so let's make sure.... * We disable the irq to make sure that a 'real' IRQ doesn't * run in parallel with our fake. */ unsigned long flags; disable_irq(irq); local_irq_save(flags); handler(irq, dev_id); local_irq_restore(flags); enable_irq(irq); } #endif return retval; } EXPORT_SYMBOL(request_threaded_irq); /** * request_any_context_irq - allocate an interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Threaded handler for threaded interrupts. * @flags: Interrupt type flags * @name: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. It selects either a * hardirq or threaded handling method depending on the * context. * * On failure, it returns a negative value. On success, * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. */ int request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); int ret; if (!desc) return -EINVAL; if (irq_settings_is_nested_thread(desc)) { ret = request_threaded_irq(irq, NULL, handler, flags, name, dev_id); return !ret ? IRQC_IS_NESTED : ret; } ret = request_irq(irq, handler, flags, name, dev_id); return !ret ? IRQC_IS_HARDIRQ : ret; } EXPORT_SYMBOL_GPL(request_any_context_irq); void enable_percpu_irq(unsigned int irq, unsigned int type) { unsigned int cpu = smp_processor_id(); unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; type &= IRQ_TYPE_SENSE_MASK; if (type != IRQ_TYPE_NONE) { int ret; ret = __irq_set_trigger(desc, type); if (ret) { WARN(1, "failed to set type for IRQ%d\n", irq); goto out; } } irq_percpu_enable(desc, cpu); out: irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(enable_percpu_irq); void disable_percpu_irq(unsigned int irq) { unsigned int cpu = smp_processor_id(); unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; irq_percpu_disable(desc, cpu); irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(disable_percpu_irq); /* * Internal function to unregister a percpu irqaction. */ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || action->percpu_dev_id != dev_id) { WARN(1, "Trying to free already-free IRQ %d\n", irq); goto bad; } if (!cpumask_empty(desc->percpu_enabled)) { WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq, cpumask_first(desc->percpu_enabled)); goto bad; } /* Found it - now remove it from the list of entries: */ desc->action = NULL; raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); module_put(desc->owner); return action; bad: raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } /** * remove_percpu_irq - free a per-cpu interrupt * @irq: Interrupt line to free * @act: irqaction for the interrupt * * Used to remove interrupts statically setup by the early boot process. */ void remove_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); if (desc && irq_settings_is_per_cpu_devid(desc)) __free_percpu_irq(irq, act->percpu_dev_id); } /** * free_percpu_irq - free an interrupt allocated with request_percpu_irq * @irq: Interrupt line to free * @dev_id: Device identity to free * * Remove a percpu interrupt handler. The handler is removed, but * the interrupt line is not disabled. This must be done on each * CPU before calling this function. The function does not return * until any executing interrupts for this IRQ have completed. * * This function must not be called from interrupt context. */ void free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; chip_bus_lock(desc); kfree(__free_percpu_irq(irq, dev_id)); chip_bus_sync_unlock(desc); } EXPORT_SYMBOL_GPL(free_percpu_irq); /** * setup_percpu_irq - setup a per-cpu interrupt * @irq: Interrupt line to setup * @act: irqaction for the interrupt * * Used to statically setup per-cpu interrupts in the early boot process. */ int setup_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); int retval; if (!desc || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; chip_bus_lock(desc); retval = __setup_irq(irq, desc, act); chip_bus_sync_unlock(desc); return retval; } /** * request_percpu_irq - allocate a percpu interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * @devname: An ascii name for the claiming device * @dev_id: A percpu cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt on the local CPU. If the interrupt is supposed to be * enabled on other CPUs, it has to be done on each CPU using * enable_percpu_irq(). * * Dev_id must be globally unique. It is a per-cpu variable, and * the handler gets called with the interrupted CPU's instance of * that variable. */ int request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if (!dev_id) return -EINVAL; desc = irq_to_desc(irq); if (!desc || !irq_settings_can_request(desc) || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; action->name = devname; action->percpu_dev_id = dev_id; chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); chip_bus_sync_unlock(desc); if (retval) kfree(action); return retval; } EXPORT_SYMBOL_GPL(request_percpu_irq); /** * irq_get_irqchip_state - returns the irqchip state of a interrupt. * @irq: Interrupt line that is forwarded to a VM * @which: One of IRQCHIP_STATE_* the caller wants to know about * @state: a pointer to a boolean where the state is to be storeed * * This call snapshots the internal irqchip state of an * interrupt, returning into @state the bit corresponding to * stage @which * * This function should be called with preemption disabled if the * interrupt controller has per-cpu registers. */ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) { struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; unsigned long flags; int err = -EINVAL; desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return err; data = irq_desc_get_irq_data(desc); do { chip = irq_data_get_irq_chip(data); if (chip->irq_get_irqchip_state) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) err = chip->irq_get_irqchip_state(data, which, state); irq_put_desc_busunlock(desc, flags); return err; } EXPORT_SYMBOL_GPL(irq_get_irqchip_state); /** * irq_set_irqchip_state - set the state of a forwarded interrupt. * @irq: Interrupt line that is forwarded to a VM * @which: State to be restored (one of IRQCHIP_STATE_*) * @val: Value corresponding to @which * * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * * This function should be called with preemption disabled if the * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) { struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; unsigned long flags; int err = -EINVAL; desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return err; data = irq_desc_get_irq_data(desc); do { chip = irq_data_get_irq_chip(data); if (chip->irq_set_irqchip_state) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) err = chip->irq_set_irqchip_state(data, which, val); irq_put_desc_busunlock(desc, flags); return err; } EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
gpl-2.0
rahulkodinya/drm
drivers/mtd/nand/r852.c
2362
25269
/* * Copyright © 2009 - Maxim Levitsky * driver for Ricoh xD readers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <linux/sched.h> #include "sm_common.h" #include "r852.h" static bool r852_enable_dma = 1; module_param(r852_enable_dma, bool, S_IRUGO); MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); static int debug; module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level (0-2)"); /* read register */ static inline uint8_t r852_read_reg(struct r852_device *dev, int address) { uint8_t reg = readb(dev->mmio + address); return reg; } /* write register */ static inline void r852_write_reg(struct r852_device *dev, int address, uint8_t value) { writeb(value, dev->mmio + address); mmiowb(); } /* read dword sized register */ static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) { uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); return reg; } /* write dword sized register */ static inline void r852_write_reg_dword(struct r852_device *dev, int address, uint32_t value) { writel(cpu_to_le32(value), dev->mmio + address); mmiowb(); } /* returns pointer to our private structure */ static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; return chip->priv; } /* check if controller supports dma */ static void r852_dma_test(struct r852_device *dev) { dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); if (!dev->dma_usable) message("Non dma capable device detected, dma disabled"); if (!r852_enable_dma) { message("disabling dma on user request"); dev->dma_usable = 0; } } /* * Enable dma. Enables ether first or second stage of the DMA, * Expects dev->dma_dir and dev->dma_state be set */ static void r852_dma_enable(struct r852_device *dev) { uint8_t dma_reg, dma_irq_reg; /* Set up dma settings */ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); if (dev->dma_dir) dma_reg |= R852_DMA_READ; if (dev->dma_state == DMA_INTERNAL) { dma_reg |= R852_DMA_INTERNAL; /* Precaution to make sure HW doesn't write */ /* to random kernel memory */ r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_bounce_buffer)); } else { dma_reg |= R852_DMA_MEMORY; r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_dma_addr)); } /* Precaution: make sure write reached the device */ r852_read_reg_dword(dev, R852_DMA_ADDR); r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); /* Set dma irq */ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, dma_irq_reg | R852_DMA_IRQ_INTERNAL | R852_DMA_IRQ_ERROR | R852_DMA_IRQ_MEMORY); } /* * Disable dma, called from the interrupt handler, which specifies * success of the operation via 'error' argument */ static void r852_dma_done(struct r852_device *dev, int error) { WARN_ON(dev->dma_stage == 0); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); /* Precaution to make sure HW doesn't write to random kernel memory */ r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_bounce_buffer)); r852_read_reg_dword(dev, R852_DMA_ADDR); dev->dma_error = error; dev->dma_stage = 0; if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); } /* * Wait, till dma is done, which includes both phases of it */ static int r852_dma_wait(struct r852_device *dev) { long timeout = wait_for_completion_timeout(&dev->dma_done, msecs_to_jiffies(1000)); if (!timeout) { dbg("timeout waiting for DMA interrupt"); return -ETIMEDOUT; } return 0; } /* * Read/Write one page using dma. Only pages can be read (512 bytes) */ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) { int bounce = 0; unsigned long flags; int error; dev->dma_error = 0; /* Set dma direction */ dev->dma_dir = do_read; dev->dma_stage = 1; INIT_COMPLETION(dev->dma_done); dbg_verbose("doing dma %s ", do_read ? "read" : "write"); /* Set initial dma state: for reading first fill on board buffer, from device, for writes first fill the buffer from memory*/ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; /* if incoming buffer is not page aligned, we should do bounce */ if ((unsigned long)buf & (R852_DMA_LEN-1)) bounce = 1; if (!bounce) { dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, R852_DMA_LEN, (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) bounce = 1; } if (bounce) { dbg_verbose("dma: using bounce buffer"); dev->phys_dma_addr = dev->phys_bounce_buffer; if (!do_read) memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); } /* Enable DMA */ spin_lock_irqsave(&dev->irqlock, flags); r852_dma_enable(dev); spin_unlock_irqrestore(&dev->irqlock, flags); /* Wait till complete */ error = r852_dma_wait(dev); if (error) { r852_dma_done(dev, error); return; } if (do_read && bounce) memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); } /* * Program data lines of the nand chip to send data to it */ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); uint32_t reg; /* Don't allow any access to hardware if we suspect card removal */ if (dev->card_unstable) return; /* Special case for whole sector read */ if (len == R852_DMA_LEN && dev->dma_usable) { r852_do_dma(dev, (uint8_t *)buf, 0); return; } /* write DWORD chinks - faster */ while (len) { reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; r852_write_reg_dword(dev, R852_DATALINE, reg); buf += 4; len -= 4; } /* write rest */ while (len) r852_write_reg(dev, R852_DATALINE, *buf++); } /* * Read data lines of the nand chip to retrieve data */ void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); uint32_t reg; if (dev->card_unstable) { /* since we can't signal error here, at least, return predictable buffer */ memset(buf, 0, len); return; } /* special case for whole sector read */ if (len == R852_DMA_LEN && dev->dma_usable) { r852_do_dma(dev, buf, 1); return; } /* read in dword sized chunks */ while (len >= 4) { reg = r852_read_reg_dword(dev, R852_DATALINE); *buf++ = reg & 0xFF; *buf++ = (reg >> 8) & 0xFF; *buf++ = (reg >> 16) & 0xFF; *buf++ = (reg >> 24) & 0xFF; len -= 4; } /* read the reset by bytes */ while (len--) *buf++ = r852_read_reg(dev, R852_DATALINE); } /* * Read one byte from nand chip */ static uint8_t r852_read_byte(struct mtd_info *mtd) { struct r852_device *dev = r852_get_dev(mtd); /* Same problem as in r852_read_buf.... */ if (dev->card_unstable) return 0; return r852_read_reg(dev, R852_DATALINE); } /* * Control several chip lines & send commands */ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return; if (ctrl & NAND_CTRL_CHANGE) { dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | R852_CTL_ON | R852_CTL_CARDENABLE); if (ctrl & NAND_ALE) dev->ctlreg |= R852_CTL_DATA; if (ctrl & NAND_CLE) dev->ctlreg |= R852_CTL_COMMAND; if (ctrl & NAND_NCE) dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); else dev->ctlreg &= ~R852_CTL_WRITE; /* when write is stareted, enable write access */ if (dat == NAND_CMD_ERASE1) dev->ctlreg |= R852_CTL_WRITE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need to set write mode */ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { dev->ctlreg |= R852_CTL_WRITE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } if (dat != NAND_CMD_NONE) r852_write_reg(dev, R852_DATALINE, dat); } /* * Wait till card is ready. * based on nand_wait, but returns errors on DMA error */ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) { struct r852_device *dev = chip->priv; unsigned long timeout; int status; timeout = jiffies + (chip->state == FL_ERASING ? msecs_to_jiffies(400) : msecs_to_jiffies(20)); while (time_before(jiffies, timeout)) if (chip->dev_ready(mtd)) break; chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); status = (int)chip->read_byte(mtd); /* Unfortunelly, no way to send detailed error status... */ if (dev->dma_error) { status |= NAND_STATUS_FAIL; dev->dma_error = 0; } return status; } /* * Check if card is ready */ int r852_ready(struct mtd_info *mtd) { struct r852_device *dev = r852_get_dev(mtd); return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); } /* * Set ECC engine mode */ void r852_ecc_hwctl(struct mtd_info *mtd, int mode) { struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return; switch (mode) { case NAND_ECC_READ: case NAND_ECC_WRITE: /* enable ecc generation/check*/ dev->ctlreg |= R852_CTL_ECC_ENABLE; /* flush ecc buffer */ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); r852_read_reg_dword(dev, R852_DATALINE); r852_write_reg(dev, R852_CTL, dev->ctlreg); return; case NAND_ECC_READSYN: /* disable ecc generation */ dev->ctlreg &= ~R852_CTL_ECC_ENABLE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } } /* * Calculate ECC, only used for writes */ int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, uint8_t *ecc_code) { struct r852_device *dev = r852_get_dev(mtd); struct sm_oob *oob = (struct sm_oob *)ecc_code; uint32_t ecc1, ecc2; if (dev->card_unstable) return 0; dev->ctlreg &= ~R852_CTL_ECC_ENABLE; r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); ecc1 = r852_read_reg_dword(dev, R852_DATALINE); ecc2 = r852_read_reg_dword(dev, R852_DATALINE); oob->ecc1[0] = (ecc1) & 0xFF; oob->ecc1[1] = (ecc1 >> 8) & 0xFF; oob->ecc1[2] = (ecc1 >> 16) & 0xFF; oob->ecc2[0] = (ecc2) & 0xFF; oob->ecc2[1] = (ecc2 >> 8) & 0xFF; oob->ecc2[2] = (ecc2 >> 16) & 0xFF; r852_write_reg(dev, R852_CTL, dev->ctlreg); return 0; } /* * Correct the data using ECC, hw did almost everything for us */ int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) { uint16_t ecc_reg; uint8_t ecc_status, err_byte; int i, error = 0; struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return 0; if (dev->dma_error) { dev->dma_error = 0; return -1; } r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); r852_write_reg(dev, R852_CTL, dev->ctlreg); for (i = 0 ; i <= 1 ; i++) { ecc_status = (ecc_reg >> 8) & 0xFF; /* ecc uncorrectable error */ if (ecc_status & R852_ECC_FAIL) { dbg("ecc: unrecoverable error, in half %d", i); error = -1; goto exit; } /* correctable error */ if (ecc_status & R852_ECC_CORRECTABLE) { err_byte = ecc_reg & 0xFF; dbg("ecc: recoverable error, " "in half %d, byte %d, bit %d", i, err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); dat[err_byte] ^= 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); error++; } dat += 256; ecc_reg >>= 16; } exit: return error; } /* * This is copy of nand_read_oob_std * nand_read_oob_syndrome assumes we can send column address - we can't */ static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } /* * Start the nand engine */ void r852_engine_enable(struct r852_device *dev) { if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); } else { r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); } msleep(300); r852_write_reg(dev, R852_CTL, 0); } /* * Stop the nand engine */ void r852_engine_disable(struct r852_device *dev) { r852_write_reg_dword(dev, R852_HW, 0); r852_write_reg(dev, R852_CTL, R852_CTL_RESET); } /* * Test if card is present */ void r852_card_update_present(struct r852_device *dev) { unsigned long flags; uint8_t reg; spin_lock_irqsave(&dev->irqlock, flags); reg = r852_read_reg(dev, R852_CARD_STA); dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); spin_unlock_irqrestore(&dev->irqlock, flags); } /* * Update card detection IRQ state according to current card state * which is read in r852_card_update_present */ void r852_update_card_detect(struct r852_device *dev) { int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); dev->card_unstable = 0; card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); card_detect_reg |= R852_CARD_IRQ_GENABLE; card_detect_reg |= dev->card_detected ? R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); } ssize_t r852_media_type_show(struct device *sys_dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); struct r852_device *dev = r852_get_dev(mtd); char *data = dev->sm ? "smartmedia" : "xd"; strcpy(buf, data); return strlen(data); } DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); /* Detect properties of card in slot */ void r852_update_media_status(struct r852_device *dev) { uint8_t reg; unsigned long flags; int readonly; spin_lock_irqsave(&dev->irqlock, flags); if (!dev->card_detected) { message("card removed"); spin_unlock_irqrestore(&dev->irqlock, flags); return ; } readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; reg = r852_read_reg(dev, R852_DMA_CAP); dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); message("detected %s %s card in slot", dev->sm ? "SmartMedia" : "xD", readonly ? "readonly" : "writeable"); dev->readonly = readonly; spin_unlock_irqrestore(&dev->irqlock, flags); } /* * Register the nand device * Called when the card is detected */ int r852_register_nand_device(struct r852_device *dev) { dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!dev->mtd) goto error1; WARN_ON(dev->card_registred); dev->mtd->owner = THIS_MODULE; dev->mtd->priv = dev->chip; dev->mtd->dev.parent = &dev->pci_dev->dev; if (dev->readonly) dev->chip->options |= NAND_ROM; r852_engine_enable(dev); if (sm_register_device(dev->mtd, dev->sm)) goto error2; if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) message("can't create media type sysfs attribute"); dev->card_registred = 1; return 0; error2: kfree(dev->mtd); error1: /* Force card redetect */ dev->card_detected = 0; return -1; } /* * Unregister the card */ void r852_unregister_nand_device(struct r852_device *dev) { if (!dev->card_registred) return; device_remove_file(&dev->mtd->dev, &dev_attr_media_type); nand_release(dev->mtd); r852_engine_disable(dev); dev->card_registred = 0; kfree(dev->mtd); dev->mtd = NULL; } /* Card state updater */ void r852_card_detect_work(struct work_struct *work) { struct r852_device *dev = container_of(work, struct r852_device, card_detect_work.work); r852_card_update_present(dev); r852_update_card_detect(dev); dev->card_unstable = 0; /* False alarm */ if (dev->card_detected == dev->card_registred) goto exit; /* Read media properties */ r852_update_media_status(dev); /* Register the card */ if (dev->card_detected) r852_register_nand_device(dev); else r852_unregister_nand_device(dev); exit: r852_update_card_detect(dev); } /* Ack + disable IRQ generation */ static void r852_disable_irqs(struct r852_device *dev) { uint8_t reg; reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, reg & ~R852_DMA_IRQ_MASK); r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); } /* Interrupt handler */ static irqreturn_t r852_irq(int irq, void *data) { struct r852_device *dev = (struct r852_device *)data; uint8_t card_status, dma_status; unsigned long flags; irqreturn_t ret = IRQ_NONE; spin_lock_irqsave(&dev->irqlock, flags); /* handle card detection interrupts first */ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { ret = IRQ_HANDLED; dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); /* we shouldn't receive any interrupts if we wait for card to settle */ WARN_ON(dev->card_unstable); /* disable irqs while card is unstable */ /* this will timeout DMA if active, but better that garbage */ r852_disable_irqs(dev); if (dev->card_unstable) goto out; /* let, card state to settle a bit, and then do the work */ dev->card_unstable = 1; queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, msecs_to_jiffies(100)); goto out; } /* Handle dma interrupts */ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); if (dma_status & R852_DMA_IRQ_MASK) { ret = IRQ_HANDLED; if (dma_status & R852_DMA_IRQ_ERROR) { dbg("received dma error IRQ"); r852_dma_done(dev, -EIO); complete(&dev->dma_done); goto out; } /* received DMA interrupt out of nowhere? */ WARN_ON_ONCE(dev->dma_stage == 0); if (dev->dma_stage == 0) goto out; /* done device access */ if (dev->dma_state == DMA_INTERNAL && (dma_status & R852_DMA_IRQ_INTERNAL)) { dev->dma_state = DMA_MEMORY; dev->dma_stage++; } /* done memory DMA */ if (dev->dma_state == DMA_MEMORY && (dma_status & R852_DMA_IRQ_MEMORY)) { dev->dma_state = DMA_INTERNAL; dev->dma_stage++; } /* Enable 2nd half of dma dance */ if (dev->dma_stage == 2) r852_dma_enable(dev); /* Operation done */ if (dev->dma_stage == 3) { r852_dma_done(dev, 0); complete(&dev->dma_done); } goto out; } /* Handle unknown interrupts */ if (dma_status) dbg("bad dma IRQ status = %x", dma_status); if (card_status & ~R852_CARD_STA_CD) dbg("strange card status = %x", card_status); out: spin_unlock_irqrestore(&dev->irqlock, flags); return ret; } int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { int error; struct nand_chip *chip; struct r852_device *dev; /* pci initialization */ error = pci_enable_device(pci_dev); if (error) goto error1; pci_set_master(pci_dev); error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); if (error) goto error2; error = pci_request_regions(pci_dev, DRV_NAME); if (error) goto error3; error = -ENOMEM; /* init nand chip, but register it only on card insert */ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); if (!chip) goto error4; /* commands */ chip->cmd_ctrl = r852_cmdctl; chip->waitfunc = r852_wait; chip->dev_ready = r852_ready; /* I/O */ chip->read_byte = r852_read_byte; chip->read_buf = r852_read_buf; chip->write_buf = r852_write_buf; /* ecc */ chip->ecc.mode = NAND_ECC_HW_SYNDROME; chip->ecc.size = R852_DMA_LEN; chip->ecc.bytes = SM_OOB_SIZE; chip->ecc.strength = 2; chip->ecc.hwctl = r852_ecc_hwctl; chip->ecc.calculate = r852_ecc_calculate; chip->ecc.correct = r852_ecc_correct; /* TODO: hack */ chip->ecc.read_oob = r852_read_oob; /* init our device structure */ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); if (!dev) goto error5; chip->priv = dev; dev->chip = chip; dev->pci_dev = pci_dev; pci_set_drvdata(pci_dev, dev); dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, &dev->phys_bounce_buffer); if (!dev->bounce_buffer) goto error6; error = -ENODEV; dev->mmio = pci_ioremap_bar(pci_dev, 0); if (!dev->mmio) goto error7; error = -ENOMEM; dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); if (!dev->tmp_buffer) goto error8; init_completion(&dev->dma_done); dev->card_workqueue = create_freezable_workqueue(DRV_NAME); if (!dev->card_workqueue) goto error9; INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); /* shutdown everything - precation */ r852_engine_disable(dev); r852_disable_irqs(dev); r852_dma_test(dev); dev->irq = pci_dev->irq; spin_lock_init(&dev->irqlock); dev->card_detected = 0; r852_card_update_present(dev); /*register irq handler*/ error = -ENODEV; if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, DRV_NAME, dev)) goto error10; /* kick initial present test */ queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, 0); printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n"); return 0; error10: destroy_workqueue(dev->card_workqueue); error9: kfree(dev->tmp_buffer); error8: pci_iounmap(pci_dev, dev->mmio); error7: pci_free_consistent(pci_dev, R852_DMA_LEN, dev->bounce_buffer, dev->phys_bounce_buffer); error6: kfree(dev); error5: kfree(chip); error4: pci_release_regions(pci_dev); error3: error2: pci_disable_device(pci_dev); error1: return error; } void r852_remove(struct pci_dev *pci_dev) { struct r852_device *dev = pci_get_drvdata(pci_dev); /* Stop detect workqueue - we are going to unregister the device anyway*/ cancel_delayed_work_sync(&dev->card_detect_work); destroy_workqueue(dev->card_workqueue); /* Unregister the device, this might make more IO */ r852_unregister_nand_device(dev); /* Stop interrupts */ r852_disable_irqs(dev); synchronize_irq(dev->irq); free_irq(dev->irq, dev); /* Cleanup */ kfree(dev->tmp_buffer); pci_iounmap(pci_dev, dev->mmio); pci_free_consistent(pci_dev, R852_DMA_LEN, dev->bounce_buffer, dev->phys_bounce_buffer); kfree(dev->chip); kfree(dev); /* Shutdown the PCI device */ pci_release_regions(pci_dev); pci_disable_device(pci_dev); } void r852_shutdown(struct pci_dev *pci_dev) { struct r852_device *dev = pci_get_drvdata(pci_dev); cancel_delayed_work_sync(&dev->card_detect_work); r852_disable_irqs(dev); synchronize_irq(dev->irq); pci_disable_device(pci_dev); } #ifdef CONFIG_PM static int r852_suspend(struct device *device) { struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); if (dev->ctlreg & R852_CTL_CARDENABLE) return -EBUSY; /* First make sure the detect work is gone */ cancel_delayed_work_sync(&dev->card_detect_work); /* Turn off the interrupts and stop the device */ r852_disable_irqs(dev); r852_engine_disable(dev); /* If card was pulled off just during the suspend, which is very unlikely, we will remove it on resume, it too late now anyway... */ dev->card_unstable = 0; return 0; } static int r852_resume(struct device *device) { struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); r852_disable_irqs(dev); r852_card_update_present(dev); r852_engine_disable(dev); /* If card status changed, just do the work */ if (dev->card_detected != dev->card_registred) { dbg("card was %s during low power state", dev->card_detected ? "added" : "removed"); queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, msecs_to_jiffies(1000)); return 0; } /* Otherwise, initialize the card */ if (dev->card_registred) { r852_engine_enable(dev); dev->chip->select_chip(dev->mtd, 0); dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); dev->chip->select_chip(dev->mtd, -1); } /* Program card detection IRQ */ r852_update_card_detect(dev); return 0; } #else #define r852_suspend NULL #define r852_resume NULL #endif static const struct pci_device_id r852_pci_id_tbl[] = { { PCI_VDEVICE(RICOH, 0x0852), }, { }, }; MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); static struct pci_driver r852_pci_driver = { .name = DRV_NAME, .id_table = r852_pci_id_tbl, .probe = r852_probe, .remove = r852_remove, .shutdown = r852_shutdown, .driver.pm = &r852_pm_ops, }; module_pci_driver(r852_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
gpl-2.0
KylinUI/android_kernel_oppo_find5
drivers/gpu/drm/radeon/radeon_connectors.c
2874
65986
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "drm_edid.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" extern void radeon_combios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected); extern void radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected); extern void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector); void radeon_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* bail if the connector does not have hpd pin, e.g., * VGA, TV, etc. */ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) return; radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); /* if the connector is already off, don't turn it back on */ if (connector->dpms != DRM_MODE_DPMS_ON) return; /* just deal with DP (not eDP) here. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { int saved_dpms = connector->dpms; /* Only turn off the display it it's physically disconnected */ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); else if (radeon_dp_needs_link_train(radeon_connector)) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); connector->dpms = saved_dpms; } } static void radeon_property_change_mode(struct drm_encoder *encoder) { struct drm_crtc *crtc = encoder->crtc; if (crtc && crtc->enabled) { drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } } static void radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *best_encoder = NULL; struct drm_encoder *encoder = NULL; struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; struct drm_mode_object *obj; bool connected; int i; best_encoder = connector_funcs->best_encoder(connector); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); if (!obj) continue; encoder = obj_to_encoder(obj); if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else connected = false; if (rdev->is_atom_bios) radeon_atombios_connected_scratch_regs(connector, encoder, connected); else radeon_combios_connected_scratch_regs(connector, encoder, connected); } } struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) { struct drm_mode_object *obj; struct drm_encoder *encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); if (!obj) continue; encoder = obj_to_encoder(obj); if (encoder->encoder_type == encoder_type) return encoder; } return NULL; } struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct drm_mode_object *obj; struct drm_encoder *encoder; /* pick the encoder ids */ if (enc_id) { obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); if (!obj) return NULL; encoder = obj_to_encoder(obj); return encoder; } return NULL; } /* * radeon_connector_analog_encoder_conflict_solve * - search for other connectors sharing this encoder * if priority is true, then set them disconnected if this is connected * if priority is false, set us disconnected if they are connected */ static enum drm_connector_status radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, struct drm_encoder *encoder, enum drm_connector_status current_status, bool priority) { struct drm_device *dev = connector->dev; struct drm_connector *conflict; struct radeon_connector *radeon_conflict; int i; list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { if (conflict == connector) continue; radeon_conflict = to_radeon_connector(conflict); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (conflict->encoder_ids[i] == 0) break; /* if the IDs match */ if (conflict->encoder_ids[i] == encoder->base.id) { if (conflict->status != connector_status_connected) continue; if (radeon_conflict->use_digital) continue; if (priority == true) { DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); conflict->status = connector_status_disconnected; radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); } else { DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); current_status = connector_status_disconnected; } break; } } } return current_status; } static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &radeon_encoder->native_mode; if (native_mode->hdisplay != 0 && native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name); } else if (native_mode->hdisplay != 0 && native_mode->vdisplay != 0) { /* mac laptops without an edid */ /* Note that this is not necessarily the exact panel mode, * but an approximation based on the cvt formula. For these * systems we should ideally read the mode info out of the * registers or add a mode table, but this works and is much * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } return mode; } static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &radeon_encoder->native_mode; int i; struct mode_size { int w; int h; } common_modes[17] = { { 640, 480}, { 720, 480}, { 800, 600}, { 848, 480}, {1024, 768}, {1152, 768}, {1280, 720}, {1280, 800}, {1280, 854}, {1280, 960}, {1280, 1024}, {1440, 900}, {1400, 1050}, {1680, 1050}, {1600, 1200}, {1920, 1080}, {1920, 1200} }; for (i = 0; i < 17; i++) { if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { if (common_modes[i].w > 1024 || common_modes[i].h > 768) continue; } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (common_modes[i].w > native_mode->hdisplay || common_modes[i].h > native_mode->vdisplay || (common_modes[i].w == native_mode->hdisplay && common_modes[i].h == native_mode->vdisplay)) continue; } if (common_modes[i].w < 320 || common_modes[i].h < 200) continue; mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); drm_mode_probed_add(connector, mode); } } int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; if (property == rdev->mode_info.coherent_mode_property) { struct radeon_encoder_atom_dig *dig; bool new_coherent_mode; /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (!radeon_encoder->enc_priv) return 0; dig = radeon_encoder->enc_priv; new_coherent_mode = val ? true : false; if (dig->coherent_mode != new_coherent_mode) { dig->coherent_mode = new_coherent_mode; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.underscan_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->underscan_type != val) { radeon_encoder->underscan_type = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.underscan_hborder_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->underscan_hborder != val) { radeon_encoder->underscan_hborder = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.underscan_vborder_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->underscan_vborder != val) { radeon_encoder->underscan_vborder = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.tv_std_property) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); if (!encoder) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC); } if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (!radeon_encoder->enc_priv) return 0; if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { struct radeon_encoder_atom_dac *dac_int; dac_int = radeon_encoder->enc_priv; dac_int->tv_std = val; } else { struct radeon_encoder_tv_dac *dac_int; dac_int = radeon_encoder->enc_priv; dac_int->tv_std = val; } radeon_property_change_mode(&radeon_encoder->base); } if (property == rdev->mode_info.load_detect_property) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (val == 0) radeon_connector->dac_load_detect = false; else radeon_connector->dac_load_detect = true; } if (property == rdev->mode_info.tmds_pll_property) { struct radeon_encoder_int_tmds *tmds = NULL; bool ret = false; /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); tmds = radeon_encoder->enc_priv; if (!tmds) return 0; if (val == 0) { if (rdev->is_atom_bios) ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds); else ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds); } if (val == 1 || ret == false) { radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds); } radeon_property_change_mode(&radeon_encoder->base); } return 0; } static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, struct drm_connector *connector) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; struct drm_display_mode *t, *mode; /* If the EDID preferred mode doesn't match the native mode, use it */ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { if (mode->type & DRM_MODE_TYPE_PREFERRED) { if (mode->hdisplay != native_mode->hdisplay || mode->vdisplay != native_mode->vdisplay) memcpy(native_mode, mode, sizeof(*mode)); } } /* Try to get native mode details from EDID if necessary */ if (!native_mode->clock) { list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { if (mode->hdisplay == native_mode->hdisplay && mode->vdisplay == native_mode->vdisplay) { *native_mode = *mode; drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); break; } } } if (!native_mode->clock) { DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); radeon_encoder->rmx_type = RMX_OFF; } } static int radeon_lvds_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; int ret = 0; struct drm_display_mode *mode; if (radeon_connector->ddc_bus) { ret = radeon_ddc_get_modes(radeon_connector); if (ret > 0) { encoder = radeon_best_single_encoder(connector); if (encoder) { radeon_fixup_lvds_native_mode(encoder, connector); /* add scaled modes */ radeon_add_common_modes(encoder, connector); } return ret; } } encoder = radeon_best_single_encoder(connector); if (!encoder) return 0; /* we have no EDID modes */ mode = radeon_fp_native_mode(encoder); if (mode) { ret = 1; drm_mode_probed_add(connector, mode); /* add the width/height from vbios tables if available */ connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; /* add scaled modes */ radeon_add_common_modes(encoder, connector); } return ret; } static int radeon_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) return MODE_PANEL; if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* AVIVO hardware supports downscaling modes larger than the panel * to the panel size, but I'm not sure this is desirable. */ if ((mode->hdisplay > native_mode->hdisplay) || (mode->vdisplay > native_mode->vdisplay)) return MODE_PANEL; /* if scaling is disabled, block non-native modes */ if (radeon_encoder->rmx_type == RMX_OFF) { if ((mode->hdisplay != native_mode->hdisplay) || (mode->vdisplay != native_mode->vdisplay)) return MODE_PANEL; } } return MODE_OK; } static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector, bool force) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector); enum drm_connector_status ret = connector_status_disconnected; if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; } /* check for edid as well */ if (radeon_connector->edid) ret = connector_status_connected; else { if (radeon_connector->ddc_bus) { radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); if (radeon_connector->edid) ret = connector_status_connected; } } /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); return ret; } static void radeon_connector_destroy(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (radeon_connector->edid) kfree(radeon_connector->edid); kfree(radeon_connector->con_priv); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static int radeon_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { struct drm_device *dev = connector->dev; struct radeon_encoder *radeon_encoder; enum radeon_rmx_type rmx_type; DRM_DEBUG_KMS("\n"); if (property != dev->mode_config.scaling_mode_property) return 0; if (connector->encoder) radeon_encoder = to_radeon_encoder(connector->encoder); else { struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); } switch (value) { case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; default: case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; } if (radeon_encoder->rmx_type == rmx_type) return 0; radeon_encoder->rmx_type = rmx_type; radeon_property_change_mode(&radeon_encoder->base); return 0; } struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { .get_modes = radeon_lvds_get_modes, .mode_valid = radeon_lvds_mode_valid, .best_encoder = radeon_best_single_encoder, }; struct drm_connector_funcs radeon_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, .set_property = radeon_lvds_set_property, }; static int radeon_vga_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); int ret; ret = radeon_ddc_get_modes(radeon_connector); return ret; } static int radeon_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; /* XXX check mode bandwidth */ if ((mode->clock / 10) > rdev->clock.max_pixel_clock) return MODE_CLOCK_HIGH; return MODE_OK; } static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; bool dret = false; enum drm_connector_status ret = connector_status_disconnected; encoder = radeon_best_single_encoder(connector); if (!encoder) ret = connector_status_disconnected; if (radeon_connector->ddc_bus) dret = radeon_ddc_probe(radeon_connector); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", drm_get_connector_name(connector)); ret = connector_status_connected; } else { radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); /* some oems have boards with separate digital and analog connectors * with a shared ddc line (often vga + hdmi) */ if (radeon_connector->use_digital && radeon_connector->shared_ddc) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; ret = connector_status_disconnected; } else ret = connector_status_connected; } } else { /* if we aren't forcing don't do destructive polling */ if (!force) { /* only return the previous status if we last * detected a monitor via load. */ if (radeon_connector->detected_by_load) return connector->status; else return ret; } if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); if (ret != connector_status_disconnected) radeon_connector->detected_by_load = true; } } if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the * vbios to deal with KVMs. If we have one and are not able to detect a monitor * by other means, assume the CRT is connected and use that EDID. */ if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && rdev->mode_info.bios_hardcoded_edid_size) { ret = connector_status_connected; } radeon_connector_update_scratch_regs(connector, ret); return ret; } struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { .get_modes = radeon_vga_get_modes, .mode_valid = radeon_vga_mode_valid, .best_encoder = radeon_best_single_encoder, }; struct drm_connector_funcs radeon_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; static int radeon_tv_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct drm_display_mode *tv_mode; struct drm_encoder *encoder; encoder = radeon_best_single_encoder(connector); if (!encoder) return 0; /* avivo chips can scale any mode */ if (rdev->family >= CHIP_RS600) /* add scaled modes */ radeon_add_common_modes(encoder, connector); else { /* only 800x600 is supported right now on pre-avivo chips */ tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false); tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, tv_mode); } return 1; } static int radeon_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) return MODE_CLOCK_RANGE; return MODE_OK; } static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector, bool force) { struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; if (!radeon_connector->dac_load_detect) return ret; encoder = radeon_best_single_encoder(connector); if (!encoder) ret = connector_status_disconnected; else { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); return ret; } struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { .get_modes = radeon_tv_get_modes, .mode_valid = radeon_tv_mode_valid, .best_encoder = radeon_best_single_encoder, }; struct drm_connector_funcs radeon_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_tv_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; static int radeon_dvi_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); int ret; ret = radeon_ddc_get_modes(radeon_connector); return ret; } static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status status; /* We only trust HPD on R600 and newer ASICS. */ if (rdev->family >= CHIP_R600 && radeon_connector->hpd.hpd != RADEON_HPD_NONE) { if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) status = connector_status_connected; else status = connector_status_disconnected; if (connector->status == status) return true; } return false; } /* * DVI is complicated * Do a DDC probe, if DDC probe passes, get the full EDID so * we can do analog/digital monitor detection at this point. * If the monitor is an analog monitor or we got no DDC, * we need to find the DAC encoder object for this connector. * If we got no DDC, we do load detection on the DAC encoder object. * If we got analog DDC or load detection passes on the DAC encoder * we have to check if this analog encoder is shared with anyone else (TV) * if its shared we have to set the other connector to disconnected. */ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; struct drm_mode_object *obj; int i; enum drm_connector_status ret = connector_status_disconnected; bool dret = false; if (!force && radeon_check_hpd_status_unchanged(connector)) return connector->status; if (radeon_connector->ddc_bus) dret = radeon_ddc_probe(radeon_connector); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", drm_get_connector_name(connector)); /* rs690 seems to have a problem with connectors not existing and always * return a block of 0's. If we see this just stop polling on this output */ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { ret = connector_status_disconnected; DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); radeon_connector->ddc_bus = NULL; } } else { radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); /* some oems have boards with separate digital and analog connectors * with a shared ddc line (often vga + hdmi) */ if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; ret = connector_status_disconnected; } else ret = connector_status_connected; /* This gets complicated. We have boards with VGA + HDMI with a * shared DDC line and we have boards with DVI-D + HDMI with a shared * DDC line. The latter is more complex because with DVI<->HDMI adapters * you don't really know what's connected to which port as both are digital. */ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { if (connector == list_connector) continue; list_radeon_connector = to_radeon_connector(list_connector); if (list_radeon_connector->shared_ddc && (list_radeon_connector->ddc_bus->rec.i2c_id == radeon_connector->ddc_bus->rec.i2c_id)) { /* cases where both connectors are digital */ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { /* hpd is our only option in this case */ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; ret = connector_status_disconnected; } } } } } } } if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) goto out; /* DVI-D and HDMI-A are digital only */ if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) || (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA)) goto out; /* if we aren't forcing don't do destructive polling */ if (!force) { /* only return the previous status if we last * detected a monitor via load. */ if (radeon_connector->detected_by_load) ret = connector->status; goto out; } /* find analog encoder */ if (radeon_connector->dac_load_detect) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); if (!obj) continue; encoder = obj_to_encoder(obj); if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; encoder_funcs = encoder->helper_private; if (encoder_funcs->detect) { if (ret != connector_status_connected) { ret = encoder_funcs->detect(encoder, connector); if (ret == connector_status_connected) { radeon_connector->use_digital = false; } if (ret != connector_status_disconnected) radeon_connector->detected_by_load = true; } break; } } } if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) && encoder) { ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); } /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the * vbios to deal with KVMs. If we have one and are not able to detect a monitor * by other means, assume the DFP is connected and use that EDID. In most * cases the DVI port is actually a virtual KVM port connected to the service * processor. */ out: if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && rdev->mode_info.bios_hardcoded_edid_size) { radeon_connector->use_digital = true; ret = connector_status_connected; } /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); return ret; } /* okay need to be smart in here about which encoder to pick */ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_mode_object *obj; struct drm_encoder *encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); if (!obj) continue; encoder = obj_to_encoder(obj); if (radeon_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; } else { if (encoder->encoder_type == DRM_MODE_ENCODER_DAC || encoder->encoder_type == DRM_MODE_ENCODER_TVDAC) return encoder; } } /* see if we have a default encoder TODO */ /* then check use digitial */ /* pick the first one */ if (enc_id) { obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); if (!obj) return NULL; encoder = obj_to_encoder(obj); return encoder; } return NULL; } static void radeon_dvi_force(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (connector->force == DRM_FORCE_ON) radeon_connector->use_digital = false; if (connector->force == DRM_FORCE_ON_DIGITAL) radeon_connector->use_digital = true; } static int radeon_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* XXX check mode bandwidth */ /* clocks over 135 MHz have heat issues with DVI on RV100 */ if (radeon_connector->use_digital && (rdev->family == CHIP_RV100) && (mode->clock > 135000)) return MODE_CLOCK_HIGH; if (radeon_connector->use_digital && (mode->clock > 165000)) { if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) return MODE_OK; else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) { if (ASIC_IS_DCE6(rdev)) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; else return MODE_OK; } else return MODE_CLOCK_HIGH; } else return MODE_CLOCK_HIGH; } /* check against the max pixel clock */ if ((mode->clock / 10) > rdev->clock.max_pixel_clock) return MODE_CLOCK_HIGH; return MODE_OK; } struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { .get_modes = radeon_dvi_get_modes, .mode_valid = radeon_dvi_mode_valid, .best_encoder = radeon_dvi_encoder, }; struct drm_connector_funcs radeon_dvi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; static void radeon_dp_connector_destroy(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; if (radeon_connector->edid) kfree(radeon_connector->edid); if (radeon_dig_connector->dp_i2c_bus) radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); kfree(radeon_connector->con_priv); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static int radeon_dp_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; struct drm_encoder *encoder = radeon_best_single_encoder(connector); int ret; if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { struct drm_display_mode *mode; if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); ret = radeon_ddc_get_modes(radeon_connector); if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); } else { /* need to setup ddc on the bridge */ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) { if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } ret = radeon_ddc_get_modes(radeon_connector); } if (ret > 0) { if (encoder) { radeon_fixup_lvds_native_mode(encoder, connector); /* add scaled modes */ radeon_add_common_modes(encoder, connector); } return ret; } if (!encoder) return 0; /* we have no EDID modes */ mode = radeon_fp_native_mode(encoder); if (mode) { ret = 1; drm_mode_probed_add(connector, mode); /* add the width/height from vbios tables if available */ connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; /* add scaled modes */ radeon_add_common_modes(encoder, connector); } } else { /* need to setup ddc on the bridge */ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) { if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } ret = radeon_ddc_get_modes(radeon_connector); } return ret; } u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) { struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); if (!obj) continue; encoder = obj_to_encoder(obj); radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: return radeon_encoder->encoder_id; default: break; } } return ENCODER_OBJECT_ID_NONE; } bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) { struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; bool found = false; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); if (!obj) continue; encoder = obj_to_encoder(obj); radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; } return found; } bool radeon_connector_is_dp12_capable(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE5(rdev) && (rdev->clock.dp_extclk >= 53900) && radeon_connector_encoder_is_hbr2(connector)) { return true; } return false; } static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; struct drm_encoder *encoder = radeon_best_single_encoder(connector); if (!force && radeon_check_hpd_status_unchanged(connector)) return connector->status; if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; } /* eDP is always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) { /* DP bridges are always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; /* get the DPCD from the bridge */ radeon_dp_getdpcd(radeon_connector); if (encoder) { /* setup ddc on the bridge */ radeon_atom_ext_encoder_setup_ddc(encoder); if (radeon_ddc_probe(radeon_connector)) /* try DDC */ ret = connector_status_connected; else if (radeon_connector->dac_load_detect) { /* try load detection */ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } } } else { radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { ret = connector_status_connected; if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) radeon_dp_getdpcd(radeon_connector); } else { if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { if (radeon_ddc_probe(radeon_connector)) ret = connector_status_connected; } } } radeon_connector_update_scratch_regs(connector, ret); return ret; } static int radeon_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; /* XXX check mode bandwidth */ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) return MODE_PANEL; if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* AVIVO hardware supports downscaling modes larger than the panel * to the panel size, but I'm not sure this is desirable. */ if ((mode->hdisplay > native_mode->hdisplay) || (mode->vdisplay > native_mode->vdisplay)) return MODE_PANEL; /* if scaling is disabled, block non-native modes */ if (radeon_encoder->rmx_type == RMX_OFF) { if ((mode->hdisplay != native_mode->hdisplay) || (mode->vdisplay != native_mode->vdisplay)) return MODE_PANEL; } } return MODE_OK; } else { if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return radeon_dp_mode_valid_helper(connector, mode); else return MODE_OK; } } struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, .best_encoder = radeon_dvi_encoder, }; struct drm_connector_funcs radeon_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, .destroy = radeon_dp_connector_destroy, .force = radeon_dvi_force, }; void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, struct radeon_router *router) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *radeon_dig_connector; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; uint32_t subpixel_order = SubPixelNone; bool shared_ddc = false; bool is_dp_bridge = false; if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; /* if the user selected tv=0 don't try and add the connector */ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || (connector_type == DRM_MODE_CONNECTOR_Composite) || (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && (radeon_tv == 0)) return; /* see if we already added it */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); if (radeon_connector->connector_id == connector_id) { radeon_connector->devices |= supported_device; return; } if (radeon_connector->ddc_bus && i2c_bus->valid) { if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { radeon_connector->shared_ddc = true; shared_ddc = true; } if (radeon_connector->router_bus && router->ddc_valid && (radeon_connector->router.router_id == router->router_id)) { radeon_connector->shared_ddc = false; shared_ddc = false; } } } /* check if it's a dp bridge */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->devices & supported_device) { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: is_dp_bridge = true; break; default: break; } } } radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); if (!radeon_connector) return; connector = &radeon_connector->base; radeon_connector->connector_id = connector_id; radeon_connector->devices = supported_device; radeon_connector->shared_ddc = shared_ddc; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; radeon_connector->router = *router; if (router->ddc_valid || router->cd_valid) { radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); if (!radeon_connector->router_bus) DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); } if (is_dp_bridge) { radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { /* add DP i2c bus */ if (connector_type == DRM_MODE_CONNECTOR_eDP) radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); else radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); if (!radeon_dig_connector->dp_i2c_bus) DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: default: connector->interlace_allowed = true; connector->doublescan_allowed = true; radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_DisplayPort: drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; } } else { switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_DVII) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_DisplayPort: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { /* add DP i2c bus */ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); if (!radeon_dig_connector->dp_i2c_bus) DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } connector->interlace_allowed = true; /* in theory with a DP to VGA converter... */ connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_eDP: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { /* add DP i2c bus */ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); if (!radeon_dig_connector->dp_i2c_bus) DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_9PinDIN: drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_atombios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; } } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { if (i2c_bus->valid) connector->polled = DRM_CONNECTOR_POLL_CONNECT; } else connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); return; failed: drm_connector_cleanup(connector); kfree(connector); } void radeon_add_legacy_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint16_t connector_object_id, struct radeon_hpd *hpd) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; uint32_t subpixel_order = SubPixelNone; if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; /* if the user selected tv=0 don't try and add the connector */ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || (connector_type == DRM_MODE_CONNECTOR_Composite) || (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && (radeon_tv == 0)) return; /* see if we already added it */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); if (radeon_connector->connector_id == connector_id) { radeon_connector->devices |= supported_device; return; } } radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); if (!radeon_connector) return; connector = &radeon_connector->base; radeon_connector->connector_id = connector_id; radeon_connector->devices = supported_device; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_DVII) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_9PinDIN: drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; /* RS400,RC410,RS480 chipset seems to report a lot * of false positive on load detect, we haven't yet * found a way to make load detect reliable on those * chipset, thus just disable it for TV. */ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) radeon_connector->dac_load_detect = false; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, radeon_connector->dac_load_detect); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { if (i2c_bus->valid) connector->polled = DRM_CONNECTOR_POLL_CONNECT; } else connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); if (connector_type == DRM_MODE_CONNECTOR_LVDS) { struct drm_encoder *drm_encoder; list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_encoder; radeon_encoder = to_radeon_encoder(drm_encoder); if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS) radeon_legacy_backlight_init(radeon_encoder, connector); } } }
gpl-2.0
alcobar/asuswrt-merlin
release/src-rt-6.x.4708/linux/linux-2.6.36/fs/sysv/ialloc.c
3386
5818
/* * linux/fs/sysv/ialloc.c * * minix/bitmap.c * Copyright (C) 1991, 1992 Linus Torvalds * * ext/freelists.c * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * * xenix/alloc.c * Copyright (C) 1992 Doug Evans * * coh/alloc.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/ialloc.c * Copyright (C) 1993 Bruno Haible * * This file contains code for allocating/freeing inodes. */ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include "sysv.h" /* We don't trust the value of sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes but we nevertheless keep it up to date. */ /* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */ /* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */ static inline sysv_ino_t * sv_sb_fic_inode(struct super_block * sb, unsigned int i) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sbi->s_bh1 == sbi->s_bh2) return &sbi->s_sb_fic_inodes[i]; else { /* 512 byte Xenix FS */ unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]); if (offset < 512) return (sysv_ino_t*)(sbi->s_sbd1 + offset); else return (sysv_ino_t*)(sbi->s_sbd2 + offset); } } struct sysv_inode * sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct sysv_inode *res; int block = sbi->s_firstinodezone + sbi->s_block_base; block += (ino-1) >> sbi->s_inodes_per_block_bits; *bh = sb_bread(sb, block); if (!*bh) return NULL; res = (struct sysv_inode *)(*bh)->b_data; return res + ((ino-1) & sbi->s_inodes_per_block_1); } static int refill_free_cache(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int i = 0, ino; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) { *sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino); if (i == sbi->s_fic_size) break; } if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; } else raw_inode++; } brelse(bh); out: return i; } void sysv_free_inode(struct inode * inode) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int ino; struct buffer_head * bh; struct sysv_inode * raw_inode; unsigned count; sb = inode->i_sb; ino = inode->i_ino; if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) { printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n"); return; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("sysv_free_inode: unable to read inode block on device " "%s\n", inode->i_sb->s_id); return; } lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count < sbi->s_fic_size) { *sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); } fs16_add(sbi, sbi->s_sb_total_free_inodes, 1); dirty_sb(sb); memset(raw_inode, 0, sizeof(struct sysv_inode)); mark_buffer_dirty(bh); unlock_super(sb); brelse(bh); } struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) { struct super_block *sb = dir->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); struct inode *inode; sysv_ino_t ino; unsigned count; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE }; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) { count = refill_free_cache(sb); if (count == 0) { iput(inode); unlock_super(sb); return ERR_PTR(-ENOSPC); } } /* Now count > 0. */ ino = *sv_sb_fic_inode(sb,--count); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); fs16_add(sbi, sbi->s_sb_total_free_inodes, -1); dirty_sb(sb); inode_init_owner(inode, dir, mode); inode->i_ino = fs16_to_cpu(sbi, ino); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data)); SYSV_I(inode)->i_dir_start_lookup = 0; insert_inode_hash(inode); mark_inode_dirty(inode); sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ /* That's it. */ unlock_super(sb); return inode; } unsigned long sysv_count_free_inodes(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int ino, count, sb_count; lock_super(sb); sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes); if (0) goto trust_sb; /* this causes a lot of disk traffic ... */ count = 0; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) count++; if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; } else raw_inode++; } brelse(bh); if (count != sb_count) goto Einval; out: unlock_super(sb); return count; Einval: printk("sysv_count_free_inodes: " "free inode count was %d, correcting to %d\n", sb_count, count); if (!(sb->s_flags & MS_RDONLY)) { *sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count); dirty_sb(sb); } goto out; Eio: printk("sysv_count_free_inodes: unable to read inode table\n"); trust_sb: count = sb_count; goto out; }
gpl-2.0
CyanogenMod/android_kernel_motorola_omap4-common
fs/sysv/ialloc.c
3386
5818
/* * linux/fs/sysv/ialloc.c * * minix/bitmap.c * Copyright (C) 1991, 1992 Linus Torvalds * * ext/freelists.c * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * * xenix/alloc.c * Copyright (C) 1992 Doug Evans * * coh/alloc.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/ialloc.c * Copyright (C) 1993 Bruno Haible * * This file contains code for allocating/freeing inodes. */ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include "sysv.h" /* We don't trust the value of sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes but we nevertheless keep it up to date. */ /* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */ /* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */ static inline sysv_ino_t * sv_sb_fic_inode(struct super_block * sb, unsigned int i) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sbi->s_bh1 == sbi->s_bh2) return &sbi->s_sb_fic_inodes[i]; else { /* 512 byte Xenix FS */ unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]); if (offset < 512) return (sysv_ino_t*)(sbi->s_sbd1 + offset); else return (sysv_ino_t*)(sbi->s_sbd2 + offset); } } struct sysv_inode * sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct sysv_inode *res; int block = sbi->s_firstinodezone + sbi->s_block_base; block += (ino-1) >> sbi->s_inodes_per_block_bits; *bh = sb_bread(sb, block); if (!*bh) return NULL; res = (struct sysv_inode *)(*bh)->b_data; return res + ((ino-1) & sbi->s_inodes_per_block_1); } static int refill_free_cache(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int i = 0, ino; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) { *sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino); if (i == sbi->s_fic_size) break; } if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; } else raw_inode++; } brelse(bh); out: return i; } void sysv_free_inode(struct inode * inode) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int ino; struct buffer_head * bh; struct sysv_inode * raw_inode; unsigned count; sb = inode->i_sb; ino = inode->i_ino; if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) { printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n"); return; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("sysv_free_inode: unable to read inode block on device " "%s\n", inode->i_sb->s_id); return; } lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count < sbi->s_fic_size) { *sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); } fs16_add(sbi, sbi->s_sb_total_free_inodes, 1); dirty_sb(sb); memset(raw_inode, 0, sizeof(struct sysv_inode)); mark_buffer_dirty(bh); unlock_super(sb); brelse(bh); } struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) { struct super_block *sb = dir->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); struct inode *inode; sysv_ino_t ino; unsigned count; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE }; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) { count = refill_free_cache(sb); if (count == 0) { iput(inode); unlock_super(sb); return ERR_PTR(-ENOSPC); } } /* Now count > 0. */ ino = *sv_sb_fic_inode(sb,--count); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); fs16_add(sbi, sbi->s_sb_total_free_inodes, -1); dirty_sb(sb); inode_init_owner(inode, dir, mode); inode->i_ino = fs16_to_cpu(sbi, ino); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data)); SYSV_I(inode)->i_dir_start_lookup = 0; insert_inode_hash(inode); mark_inode_dirty(inode); sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ /* That's it. */ unlock_super(sb); return inode; } unsigned long sysv_count_free_inodes(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int ino, count, sb_count; lock_super(sb); sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes); if (0) goto trust_sb; /* this causes a lot of disk traffic ... */ count = 0; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) count++; if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; } else raw_inode++; } brelse(bh); if (count != sb_count) goto Einval; out: unlock_super(sb); return count; Einval: printk("sysv_count_free_inodes: " "free inode count was %d, correcting to %d\n", sb_count, count); if (!(sb->s_flags & MS_RDONLY)) { *sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count); dirty_sb(sb); } goto out; Eio: printk("sysv_count_free_inodes: unable to read inode table\n"); trust_sb: count = sb_count; goto out; }
gpl-2.0
pacificIT/linux-2.6.36
fs/sysv/ialloc.c
3386
5818
/* * linux/fs/sysv/ialloc.c * * minix/bitmap.c * Copyright (C) 1991, 1992 Linus Torvalds * * ext/freelists.c * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * * xenix/alloc.c * Copyright (C) 1992 Doug Evans * * coh/alloc.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/ialloc.c * Copyright (C) 1993 Bruno Haible * * This file contains code for allocating/freeing inodes. */ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include "sysv.h" /* We don't trust the value of sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes but we nevertheless keep it up to date. */ /* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */ /* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */ static inline sysv_ino_t * sv_sb_fic_inode(struct super_block * sb, unsigned int i) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sbi->s_bh1 == sbi->s_bh2) return &sbi->s_sb_fic_inodes[i]; else { /* 512 byte Xenix FS */ unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]); if (offset < 512) return (sysv_ino_t*)(sbi->s_sbd1 + offset); else return (sysv_ino_t*)(sbi->s_sbd2 + offset); } } struct sysv_inode * sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct sysv_inode *res; int block = sbi->s_firstinodezone + sbi->s_block_base; block += (ino-1) >> sbi->s_inodes_per_block_bits; *bh = sb_bread(sb, block); if (!*bh) return NULL; res = (struct sysv_inode *)(*bh)->b_data; return res + ((ino-1) & sbi->s_inodes_per_block_1); } static int refill_free_cache(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int i = 0, ino; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) { *sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino); if (i == sbi->s_fic_size) break; } if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; } else raw_inode++; } brelse(bh); out: return i; } void sysv_free_inode(struct inode * inode) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int ino; struct buffer_head * bh; struct sysv_inode * raw_inode; unsigned count; sb = inode->i_sb; ino = inode->i_ino; if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) { printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n"); return; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("sysv_free_inode: unable to read inode block on device " "%s\n", inode->i_sb->s_id); return; } lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count < sbi->s_fic_size) { *sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); } fs16_add(sbi, sbi->s_sb_total_free_inodes, 1); dirty_sb(sb); memset(raw_inode, 0, sizeof(struct sysv_inode)); mark_buffer_dirty(bh); unlock_super(sb); brelse(bh); } struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) { struct super_block *sb = dir->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); struct inode *inode; sysv_ino_t ino; unsigned count; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE }; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); lock_super(sb); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) { count = refill_free_cache(sb); if (count == 0) { iput(inode); unlock_super(sb); return ERR_PTR(-ENOSPC); } } /* Now count > 0. */ ino = *sv_sb_fic_inode(sb,--count); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); fs16_add(sbi, sbi->s_sb_total_free_inodes, -1); dirty_sb(sb); inode_init_owner(inode, dir, mode); inode->i_ino = fs16_to_cpu(sbi, ino); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data)); SYSV_I(inode)->i_dir_start_lookup = 0; insert_inode_hash(inode); mark_inode_dirty(inode); sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ /* That's it. */ unlock_super(sb); return inode; } unsigned long sysv_count_free_inodes(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int ino, count, sb_count; lock_super(sb); sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes); if (0) goto trust_sb; /* this causes a lot of disk traffic ... */ count = 0; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) count++; if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; } else raw_inode++; } brelse(bh); if (count != sb_count) goto Einval; out: unlock_super(sb); return count; Einval: printk("sysv_count_free_inodes: " "free inode count was %d, correcting to %d\n", sb_count, count); if (!(sb->s_flags & MS_RDONLY)) { *sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count); dirty_sb(sb); } goto out; Eio: printk("sysv_count_free_inodes: unable to read inode table\n"); trust_sb: count = sb_count; goto out; }
gpl-2.0
lewislone/kernel_reading
arch/ia64/sn/kernel/tiocx.c
4154
13224
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/capability.h> #include <linux/device.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/io.h> #include <asm/sn/types.h> #include <asm/sn/shubio.h> #include <asm/sn/tiocx.h> #include <asm/sn/l1.h> #include <asm/sn/module.h> #include "tio.h" #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" #define CX_DEV_NONE 0 #define DEVICE_NAME "tiocx" #define WIDGET_ID 0 #define TIOCX_DEBUG 0 #if TIOCX_DEBUG #define DBG(fmt...) printk(KERN_ALERT fmt) #else #define DBG(fmt...) #endif struct device_attribute dev_attr_cxdev_control; /** * tiocx_match - Try to match driver id list with device. * @dev: device pointer * @drv: driver pointer * * Returns 1 if match, 0 otherwise. */ static int tiocx_match(struct device *dev, struct device_driver *drv) { struct cx_dev *cx_dev = to_cx_dev(dev); struct cx_drv *cx_drv = to_cx_driver(drv); const struct cx_device_id *ids = cx_drv->id_table; if (!ids) return 0; while (ids->part_num) { if (ids->part_num == cx_dev->cx_id.part_num) return 1; ids++; } return 0; } static int tiocx_uevent(struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } static void tiocx_bus_release(struct device *dev) { kfree(to_cx_dev(dev)); } /** * cx_device_match - Find cx_device in the id table. * @ids: id table from driver * @cx_device: part/mfg id for the device * */ static const struct cx_device_id *cx_device_match(const struct cx_device_id *ids, struct cx_dev *cx_device) { /* * NOTES: We may want to check for CX_ANY_ID too. * Do we want to match against nasid too? * CX_DEV_NONE == 0, if the driver tries to register for * part/mfg == 0 we should return no-match (NULL) here. */ while (ids->part_num && ids->mfg_num) { if (ids->part_num == cx_device->cx_id.part_num && ids->mfg_num == cx_device->cx_id.mfg_num) return ids; ids++; } return NULL; } /** * cx_device_probe - Look for matching device. * Call driver probe routine if found. * @cx_driver: driver table (cx_drv struct) from driver * @cx_device: part/mfg id for the device */ static int cx_device_probe(struct device *dev) { const struct cx_device_id *id; struct cx_drv *cx_drv = to_cx_driver(dev->driver); struct cx_dev *cx_dev = to_cx_dev(dev); int error = 0; if (!cx_dev->driver && cx_drv->probe) { id = cx_device_match(cx_drv->id_table, cx_dev); if (id) { if ((error = cx_drv->probe(cx_dev, id)) < 0) return error; else cx_dev->driver = cx_drv; } } return error; } /** * cx_driver_remove - Remove driver from device struct. * @dev: device */ static int cx_driver_remove(struct device *dev) { struct cx_dev *cx_dev = to_cx_dev(dev); struct cx_drv *cx_drv = cx_dev->driver; if (cx_drv->remove) cx_drv->remove(cx_dev); cx_dev->driver = NULL; return 0; } struct bus_type tiocx_bus_type = { .name = "tiocx", .match = tiocx_match, .uevent = tiocx_uevent, .probe = cx_device_probe, .remove = cx_driver_remove, }; /** * cx_driver_register - Register the driver. * @cx_driver: driver table (cx_drv struct) from driver * * Called from the driver init routine to register a driver. * The cx_drv struct contains the driver name, a pointer to * a table of part/mfg numbers and a pointer to the driver's * probe/attach routine. */ int cx_driver_register(struct cx_drv *cx_driver) { cx_driver->driver.name = cx_driver->name; cx_driver->driver.bus = &tiocx_bus_type; return driver_register(&cx_driver->driver); } /** * cx_driver_unregister - Unregister the driver. * @cx_driver: driver table (cx_drv struct) from driver */ int cx_driver_unregister(struct cx_drv *cx_driver) { driver_unregister(&cx_driver->driver); return 0; } /** * cx_device_register - Register a device. * @nasid: device's nasid * @part_num: device's part number * @mfg_num: device's manufacturer number * @hubdev: hub info associated with this device * @bt: board type of the device * */ int cx_device_register(nasid_t nasid, int part_num, int mfg_num, struct hubdev_info *hubdev, int bt) { struct cx_dev *cx_dev; int r; cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL); DBG("cx_dev= 0x%p\n", cx_dev); if (cx_dev == NULL) return -ENOMEM; cx_dev->cx_id.part_num = part_num; cx_dev->cx_id.mfg_num = mfg_num; cx_dev->cx_id.nasid = nasid; cx_dev->hubdev = hubdev; cx_dev->bt = bt; cx_dev->dev.parent = NULL; cx_dev->dev.bus = &tiocx_bus_type; cx_dev->dev.release = tiocx_bus_release; dev_set_name(&cx_dev->dev, "%d", cx_dev->cx_id.nasid); r = device_register(&cx_dev->dev); if (r) { kfree(cx_dev); return r; } get_device(&cx_dev->dev); device_create_file(&cx_dev->dev, &dev_attr_cxdev_control); return 0; } /** * cx_device_unregister - Unregister a device. * @cx_dev: part/mfg id for the device */ int cx_device_unregister(struct cx_dev *cx_dev) { put_device(&cx_dev->dev); device_unregister(&cx_dev->dev); return 0; } /** * cx_device_reload - Reload the device. * @nasid: device's nasid * @part_num: device's part number * @mfg_num: device's manufacturer number * * Remove the device associated with 'nasid' from device list and then * call device-register with the given part/mfg numbers. */ static int cx_device_reload(struct cx_dev *cx_dev) { cx_device_unregister(cx_dev); return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, cx_dev->hubdev, cx_dev->bt); } static inline u64 tiocx_intr_alloc(nasid_t nasid, int widget, u64 sn_irq_info, int req_irq, nasid_t req_nasid, int req_slice) { struct ia64_sal_retval rv; rv.status = 0; rv.v0 = 0; ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT, SAL_INTR_ALLOC, nasid, widget, sn_irq_info, req_irq, req_nasid, req_slice); return rv.status; } static inline void tiocx_intr_free(nasid_t nasid, int widget, struct sn_irq_info *sn_irq_info) { struct ia64_sal_retval rv; rv.status = 0; rv.v0 = 0; ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT, SAL_INTR_FREE, nasid, widget, sn_irq_info->irq_irq, sn_irq_info->irq_cookie, 0, 0); } struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq, nasid_t req_nasid, int slice) { struct sn_irq_info *sn_irq_info; int status; int sn_irq_size = sizeof(struct sn_irq_info); if ((nasid & 1) == 0) return NULL; sn_irq_info = kzalloc(sn_irq_size, GFP_KERNEL); if (sn_irq_info == NULL) return NULL; status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, req_nasid, slice); if (status) { kfree(sn_irq_info); return NULL; } else { return sn_irq_info; } } void tiocx_irq_free(struct sn_irq_info *sn_irq_info) { u64 bridge = (u64) sn_irq_info->irq_bridge; nasid_t nasid = NASID_GET(bridge); int widget; if (nasid & 1) { widget = TIO_SWIN_WIDGETNUM(bridge); tiocx_intr_free(nasid, widget, sn_irq_info); kfree(sn_irq_info); } } u64 tiocx_dma_addr(u64 addr) { return PHYS_TO_TIODMA(addr); } u64 tiocx_swin_base(int nasid) { return TIO_SWIN_BASE(nasid, TIOCX_CORELET); } EXPORT_SYMBOL(cx_driver_register); EXPORT_SYMBOL(cx_driver_unregister); EXPORT_SYMBOL(cx_device_register); EXPORT_SYMBOL(cx_device_unregister); EXPORT_SYMBOL(tiocx_irq_alloc); EXPORT_SYMBOL(tiocx_irq_free); EXPORT_SYMBOL(tiocx_bus_type); EXPORT_SYMBOL(tiocx_dma_addr); EXPORT_SYMBOL(tiocx_swin_base); static void tio_conveyor_set(nasid_t nasid, int enable_flag) { u64 ice_frz; u64 disable_cb = (1ull << 61); if (!(nasid & 1)) return; ice_frz = REMOTE_HUB_L(nasid, TIO_ICE_FRZ_CFG); if (enable_flag) { if (!(ice_frz & disable_cb)) /* already enabled */ return; ice_frz &= ~disable_cb; } else { if (ice_frz & disable_cb) /* already disabled */ return; ice_frz |= disable_cb; } DBG(KERN_ALERT "TIO_ICE_FRZ_CFG= 0x%lx\n", ice_frz); REMOTE_HUB_S(nasid, TIO_ICE_FRZ_CFG, ice_frz); } #define tio_conveyor_enable(nasid) tio_conveyor_set(nasid, 1) #define tio_conveyor_disable(nasid) tio_conveyor_set(nasid, 0) static void tio_corelet_reset(nasid_t nasid, int corelet) { if (!(nasid & 1)) return; REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 1 << corelet); udelay(2000); REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 0); udelay(2000); } static int is_fpga_tio(int nasid, int *bt) { u16 uninitialized_var(ioboard_type); /* GCC be quiet */ long rc; rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type); if (rc) { printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n", rc); return 0; } switch (ioboard_type) { case L1_BRICKTYPE_SA: case L1_BRICKTYPE_ATHENA: case L1_BOARDTYPE_DAYTONA: *bt = ioboard_type; return 1; } return 0; } static int bitstream_loaded(nasid_t nasid) { u64 cx_credits; cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3); cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK; DBG("cx_credits= 0x%lx\n", cx_credits); return (cx_credits == 0xf) ? 1 : 0; } static int tiocx_reload(struct cx_dev *cx_dev) { int part_num = CX_DEV_NONE; int mfg_num = CX_DEV_NONE; nasid_t nasid = cx_dev->cx_id.nasid; if (bitstream_loaded(nasid)) { u64 cx_id; int rv; rv = ia64_sn_sysctl_tio_clock_reset(nasid); if (rv) { printk(KERN_ALERT "CX port JTAG reset failed.\n"); } else { cx_id = *(volatile u64 *) (TIO_SWIN_BASE(nasid, TIOCX_CORELET) + WIDGET_ID); part_num = XWIDGET_PART_NUM(cx_id); mfg_num = XWIDGET_MFG_NUM(cx_id); DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num); /* just ignore it if it's a CE */ if (part_num == TIO_CE_ASIC_PARTNUM) return 0; } } cx_dev->cx_id.part_num = part_num; cx_dev->cx_id.mfg_num = mfg_num; /* * Delete old device and register the new one. It's ok if * part_num/mfg_num == CX_DEV_NONE. We want to register * devices in the table even if a bitstream isn't loaded. * That allows use to see that a bitstream isn't loaded via * TIOCX_IOCTL_DEV_LIST. */ return cx_device_reload(cx_dev); } static ssize_t show_cxdev_control(struct device *dev, struct device_attribute *attr, char *buf) { struct cx_dev *cx_dev = to_cx_dev(dev); return sprintf(buf, "0x%x 0x%x 0x%x 0x%x\n", cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, cx_dev->bt); } static ssize_t store_cxdev_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int n; struct cx_dev *cx_dev = to_cx_dev(dev); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 0) return 0; n = simple_strtoul(buf, NULL, 0); switch (n) { case 1: tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET); tiocx_reload(cx_dev); break; case 2: tiocx_reload(cx_dev); break; case 3: tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET); break; default: break; } return count; } DEVICE_ATTR(cxdev_control, 0644, show_cxdev_control, store_cxdev_control); static int __init tiocx_init(void) { cnodeid_t cnodeid; int found_tiocx_device = 0; int err; if (!ia64_platform_is("sn2")) return 0; err = bus_register(&tiocx_bus_type); if (err) return err; for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { nasid_t nasid; int bt; nasid = cnodeid_to_nasid(cnodeid); if ((nasid & 0x1) && is_fpga_tio(nasid, &bt)) { struct hubdev_info *hubdev; struct xwidget_info *widgetp; DBG("Found TIO at nasid 0x%x\n", nasid); hubdev = (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; /* The CE hangs off of the CX port but is not an FPGA */ if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM) continue; tio_corelet_reset(nasid, TIOCX_CORELET); tio_conveyor_enable(nasid); if (cx_device_register (nasid, widgetp->xwi_hwid.part_num, widgetp->xwi_hwid.mfg_num, hubdev, bt) < 0) return -ENXIO; else found_tiocx_device++; } } /* It's ok if we find zero devices. */ DBG("found_tiocx_device= %d\n", found_tiocx_device); return 0; } static int cx_remove_device(struct device * dev, void * data) { struct cx_dev *cx_dev = to_cx_dev(dev); device_remove_file(dev, &dev_attr_cxdev_control); cx_device_unregister(cx_dev); return 0; } static void __exit tiocx_exit(void) { DBG("tiocx_exit\n"); /* * Unregister devices. */ bus_for_each_dev(&tiocx_bus_type, NULL, NULL, cx_remove_device); bus_unregister(&tiocx_bus_type); } fs_initcall(tiocx_init); module_exit(tiocx_exit); /************************************************************************ * Module licensing and description ************************************************************************/ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>"); MODULE_DESCRIPTION("TIOCX module"); MODULE_SUPPORTED_DEVICE(DEVICE_NAME);
gpl-2.0
bensonhsu2013/old_samsung-lt02wifi-kernel
arch/sh/drivers/dma/dma-sh.c
4666
8054
/* * arch/sh/drivers/dma/dma-sh.c * * SuperH On-chip DMAC Support * * Copyright (C) 2000 Takashi YOSHII * Copyright (C) 2003, 2004 Paul Mundt * Copyright (C) 2005 Andriy Skulysh * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <mach-dreamcast/mach/dma.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/dma-sh.h> #if defined(DMAE1_IRQ) #define NR_DMAE 2 #else #define NR_DMAE 1 #endif static const char *dmae_name[] = { "DMAC Address Error0", "DMAC Address Error1" }; static inline unsigned int get_dmte_irq(unsigned int chan) { unsigned int irq = 0; if (chan < ARRAY_SIZE(dmte_irq_map)) irq = dmte_irq_map[chan]; #if defined(CONFIG_SH_DMA_IRQ_MULTI) if (irq > DMTE6_IRQ) return DMTE6_IRQ; return DMTE0_IRQ; #else return irq; #endif } /* * We determine the correct shift size based off of the CHCR transmit size * for the given channel. Since we know that it will take: * * info->count >> ts_shift[transmit_size] * * iterations to complete the transfer. */ static unsigned int ts_shift[] = TS_SHIFT; static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); return ts_shift[cnt]; } /* * The transfer end interrupt must read the chcr register to end the * hardware interrupt active condition. * Besides that it needs to waken any waiting process, which should handle * setting up the next transfer. */ static irqreturn_t dma_tei(int irq, void *dev_id) { struct dma_channel *chan = dev_id; u32 chcr; chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); if (!(chcr & CHCR_TE)) return IRQ_NONE; chcr &= ~(CHCR_IE | CHCR_DE); __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); wake_up(&chan->wait_queue); return IRQ_HANDLED; } static int sh_dmac_request_dma(struct dma_channel *chan) { if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) return 0; return request_irq(get_dmte_irq(chan->chan), dma_tei, #if defined(CONFIG_SH_DMA_IRQ_MULTI) IRQF_SHARED, #else 0, #endif chan->dev_id, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) { free_irq(get_dmte_irq(chan->chan), chan); } static int sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) { if (!chcr) chcr = RS_DUAL | CHCR_IE; if (chcr & CHCR_IE) { chcr &= ~CHCR_IE; chan->flags |= DMA_TEI_CAPABLE; } else { chan->flags &= ~DMA_TEI_CAPABLE; } __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); chan->flags |= DMA_CONFIGURED; return 0; } static void sh_dmac_enable_dma(struct dma_channel *chan) { int irq; u32 chcr; chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); chcr |= CHCR_DE; if (chan->flags & DMA_TEI_CAPABLE) chcr |= CHCR_IE; __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); enable_irq(irq); } } static void sh_dmac_disable_dma(struct dma_channel *chan) { int irq; u32 chcr; if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); disable_irq(irq); } chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); } static int sh_dmac_xfer_dma(struct dma_channel *chan) { /* * If we haven't pre-configured the channel with special flags, use * the defaults. */ if (unlikely(!(chan->flags & DMA_CONFIGURED))) sh_dmac_configure_channel(chan, 0); sh_dmac_disable_dma(chan); /* * Single-address mode usage note! * * It's important that we don't accidentally write any value to SAR/DAR * (this includes 0) that hasn't been directly specified by the user if * we're in single-address mode. * * In this case, only one address can be defined, anything else will * result in a DMA address error interrupt (at least on the SH-4), * which will subsequently halt the transfer. * * Channel 2 on the Dreamcast is a special case, as this is used for * cascading to the PVR2 DMAC. In this case, we still need to write * SAR and DAR, regardless of value, in order for cascading to work. */ if (chan->sar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) __raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR)); if (chan->dar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) __raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR)); __raw_writel(chan->count >> calc_xmit_shift(chan), (dma_base_addr[chan->chan] + TCR)); sh_dmac_enable_dma(chan); return 0; } static int sh_dmac_get_dma_residue(struct dma_channel *chan) { if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) return 0; return __raw_readl(dma_base_addr[chan->chan] + TCR) << calc_xmit_shift(chan); } static inline int dmaor_reset(int no) { unsigned long dmaor = dmaor_read_reg(no); /* Try to clear the error flags first, incase they are set */ dmaor &= ~(DMAOR_NMIF | DMAOR_AE); dmaor_write_reg(no, dmaor); dmaor |= DMAOR_INIT; dmaor_write_reg(no, dmaor); /* See if we got an error again */ if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) { printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); return -EINVAL; } return 0; } #if defined(CONFIG_CPU_SH4) static irqreturn_t dma_err(int irq, void *dummy) { #if defined(CONFIG_SH_DMA_IRQ_MULTI) int cnt = 0; switch (irq) { #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) case DMTE6_IRQ: cnt++; #endif case DMTE0_IRQ: if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { disable_irq(irq); /* DMA multi and error IRQ */ return IRQ_HANDLED; } default: return IRQ_NONE; } #else dmaor_reset(0); #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) dmaor_reset(1); #endif disable_irq(irq); return IRQ_HANDLED; #endif } #endif static struct dma_ops sh_dmac_ops = { .request = sh_dmac_request_dma, .free = sh_dmac_free_dma, .get_residue = sh_dmac_get_dma_residue, .xfer = sh_dmac_xfer_dma, .configure = sh_dmac_configure_channel, }; static struct dma_info sh_dmac_info = { .name = "sh_dmac", .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, .ops = &sh_dmac_ops, .flags = DMAC_CHANNELS_TEI_CAPABLE, }; #ifdef CONFIG_CPU_SH4 static unsigned int get_dma_error_irq(int n) { #if defined(CONFIG_SH_DMA_IRQ_MULTI) return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); #else return (n == 0) ? DMAE0_IRQ : #if defined(DMAE1_IRQ) DMAE1_IRQ; #else -1; #endif #endif } #endif static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; int i; #ifdef CONFIG_CPU_SH4 int n; for (n = 0; n < NR_DMAE; n++) { i = request_irq(get_dma_error_irq(n), dma_err, #if defined(CONFIG_SH_DMA_IRQ_MULTI) IRQF_SHARED, #else 0, #endif dmae_name[n], (void *)dmae_name[n]); if (unlikely(i < 0)) { printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); return i; } } #endif /* CONFIG_CPU_SH4 */ /* * Initialize DMAOR, and clean up any error flags that may have * been set. */ i = dmaor_reset(0); if (unlikely(i != 0)) return i; #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) i = dmaor_reset(1); if (unlikely(i != 0)) return i; #endif return register_dmac(info); } static void __exit sh_dmac_exit(void) { #ifdef CONFIG_CPU_SH4 int n; for (n = 0; n < NR_DMAE; n++) { free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); } #endif /* CONFIG_CPU_SH4 */ unregister_dmac(&sh_dmac_info); } subsys_initcall(sh_dmac_init); module_exit(sh_dmac_exit); MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); MODULE_LICENSE("GPL");
gpl-2.0
nbars/SM-P600-linux-kernel
arch/arm/mach-tegra/board-dt-tegra20.c
4666
4876
/* * nVidia Tegra device tree board support * * Copyright (C) 2010 Secret Lab Technologies, Ltd. * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pda_power.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-tegra.h> #include <asm/hardware/gic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/setup.h> #include <asm/hardware/gic.h> #include <mach/iomap.h> #include <mach/irqs.h> #include "board.h" #include "board-harmony.h" #include "clock.h" #include "devices.h" void harmony_pinmux_init(void); void paz00_pinmux_init(void); void seaboard_pinmux_init(void); void trimslice_pinmux_init(void); void ventana_pinmux_init(void); struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("nvidia,tegra20-pinmux", TEGRA_APB_MISC_BASE + 0x14, "tegra-pinmux", NULL), OF_DEV_AUXDATA("nvidia,tegra20-gpio", TEGRA_GPIO_BASE, "tegra-gpio", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC1_BASE, "sdhci-tegra.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC2_BASE, "sdhci-tegra.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC3_BASE, "sdhci-tegra.2", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC4_BASE, "sdhci-tegra.3", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C_BASE, "tegra-i2c.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C2_BASE, "tegra-i2c.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C3_BASE, "tegra-i2c.2", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c-dvc", TEGRA_DVC_BASE, "tegra-i2c.3", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S1_BASE, "tegra-i2s.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S2_BASE, "tegra-i2s.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB_BASE, "tegra-ehci.0", &tegra_ehci1_pdata), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB2_BASE, "tegra-ehci.1", &tegra_ehci2_pdata), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2", &tegra_ehci3_pdata), {} }; static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = { /* name parent rate enabled */ { "uartd", "pll_p", 216000000, true }, { "usbd", "clk_m", 12000000, false }, { "usb2", "clk_m", 12000000, false }, { "usb3", "clk_m", 12000000, false }, { "pll_a", "pll_p_out1", 56448000, true }, { "pll_a_out0", "pll_a", 11289600, true }, { "cdev1", NULL, 0, true }, { "i2s1", "pll_a_out0", 11289600, false}, { "i2s2", "pll_a_out0", 11289600, false}, { NULL, NULL, 0, 0}, }; static struct of_device_id tegra_dt_match_table[] __initdata = { { .compatible = "simple-bus", }, {} }; static struct { char *machine; void (*init)(void); } pinmux_configs[] = { { "compulab,trimslice", trimslice_pinmux_init }, { "nvidia,harmony", harmony_pinmux_init }, { "compal,paz00", paz00_pinmux_init }, { "nvidia,seaboard", seaboard_pinmux_init }, { "nvidia,ventana", ventana_pinmux_init }, }; static void __init tegra_dt_init(void) { int i; tegra_clk_init_from_table(tegra_dt_clk_init_table); for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) { if (of_machine_is_compatible(pinmux_configs[i].machine)) { pinmux_configs[i].init(); break; } } WARN(i == ARRAY_SIZE(pinmux_configs), "Unknown platform! Pinmuxing not initialized\n"); /* * Finished with the static registrations now; fill in the missing * devices */ of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL); } static const char *tegra20_dt_board_compat[] = { "nvidia,tegra20", NULL }; DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)") .map_io = tegra_map_common_io, .init_early = tegra20_init_early, .init_irq = tegra_dt_init_irq, .handle_irq = gic_handle_irq, .timer = &tegra_timer, .init_machine = tegra_dt_init, .restart = tegra_assert_system_reset, .dt_compat = tegra20_dt_board_compat, MACHINE_END
gpl-2.0
Vachounet/android_kernel_acer_hemingway
arch/arm/mach-tegra/board-dt-tegra20.c
4666
4876
/* * nVidia Tegra device tree board support * * Copyright (C) 2010 Secret Lab Technologies, Ltd. * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pda_power.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-tegra.h> #include <asm/hardware/gic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/setup.h> #include <asm/hardware/gic.h> #include <mach/iomap.h> #include <mach/irqs.h> #include "board.h" #include "board-harmony.h" #include "clock.h" #include "devices.h" void harmony_pinmux_init(void); void paz00_pinmux_init(void); void seaboard_pinmux_init(void); void trimslice_pinmux_init(void); void ventana_pinmux_init(void); struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("nvidia,tegra20-pinmux", TEGRA_APB_MISC_BASE + 0x14, "tegra-pinmux", NULL), OF_DEV_AUXDATA("nvidia,tegra20-gpio", TEGRA_GPIO_BASE, "tegra-gpio", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC1_BASE, "sdhci-tegra.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC2_BASE, "sdhci-tegra.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC3_BASE, "sdhci-tegra.2", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC4_BASE, "sdhci-tegra.3", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C_BASE, "tegra-i2c.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C2_BASE, "tegra-i2c.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C3_BASE, "tegra-i2c.2", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c-dvc", TEGRA_DVC_BASE, "tegra-i2c.3", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S1_BASE, "tegra-i2s.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S2_BASE, "tegra-i2s.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB_BASE, "tegra-ehci.0", &tegra_ehci1_pdata), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB2_BASE, "tegra-ehci.1", &tegra_ehci2_pdata), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2", &tegra_ehci3_pdata), {} }; static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = { /* name parent rate enabled */ { "uartd", "pll_p", 216000000, true }, { "usbd", "clk_m", 12000000, false }, { "usb2", "clk_m", 12000000, false }, { "usb3", "clk_m", 12000000, false }, { "pll_a", "pll_p_out1", 56448000, true }, { "pll_a_out0", "pll_a", 11289600, true }, { "cdev1", NULL, 0, true }, { "i2s1", "pll_a_out0", 11289600, false}, { "i2s2", "pll_a_out0", 11289600, false}, { NULL, NULL, 0, 0}, }; static struct of_device_id tegra_dt_match_table[] __initdata = { { .compatible = "simple-bus", }, {} }; static struct { char *machine; void (*init)(void); } pinmux_configs[] = { { "compulab,trimslice", trimslice_pinmux_init }, { "nvidia,harmony", harmony_pinmux_init }, { "compal,paz00", paz00_pinmux_init }, { "nvidia,seaboard", seaboard_pinmux_init }, { "nvidia,ventana", ventana_pinmux_init }, }; static void __init tegra_dt_init(void) { int i; tegra_clk_init_from_table(tegra_dt_clk_init_table); for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) { if (of_machine_is_compatible(pinmux_configs[i].machine)) { pinmux_configs[i].init(); break; } } WARN(i == ARRAY_SIZE(pinmux_configs), "Unknown platform! Pinmuxing not initialized\n"); /* * Finished with the static registrations now; fill in the missing * devices */ of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL); } static const char *tegra20_dt_board_compat[] = { "nvidia,tegra20", NULL }; DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)") .map_io = tegra_map_common_io, .init_early = tegra20_init_early, .init_irq = tegra_dt_init_irq, .handle_irq = gic_handle_irq, .timer = &tegra_timer, .init_machine = tegra_dt_init, .restart = tegra_assert_system_reset, .dt_compat = tegra20_dt_board_compat, MACHINE_END
gpl-2.0
Potin/linux-am33x-04.06.00.10
arch/sh/drivers/dma/dma-sh.c
4666
8054
/* * arch/sh/drivers/dma/dma-sh.c * * SuperH On-chip DMAC Support * * Copyright (C) 2000 Takashi YOSHII * Copyright (C) 2003, 2004 Paul Mundt * Copyright (C) 2005 Andriy Skulysh * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <mach-dreamcast/mach/dma.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/dma-sh.h> #if defined(DMAE1_IRQ) #define NR_DMAE 2 #else #define NR_DMAE 1 #endif static const char *dmae_name[] = { "DMAC Address Error0", "DMAC Address Error1" }; static inline unsigned int get_dmte_irq(unsigned int chan) { unsigned int irq = 0; if (chan < ARRAY_SIZE(dmte_irq_map)) irq = dmte_irq_map[chan]; #if defined(CONFIG_SH_DMA_IRQ_MULTI) if (irq > DMTE6_IRQ) return DMTE6_IRQ; return DMTE0_IRQ; #else return irq; #endif } /* * We determine the correct shift size based off of the CHCR transmit size * for the given channel. Since we know that it will take: * * info->count >> ts_shift[transmit_size] * * iterations to complete the transfer. */ static unsigned int ts_shift[] = TS_SHIFT; static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); return ts_shift[cnt]; } /* * The transfer end interrupt must read the chcr register to end the * hardware interrupt active condition. * Besides that it needs to waken any waiting process, which should handle * setting up the next transfer. */ static irqreturn_t dma_tei(int irq, void *dev_id) { struct dma_channel *chan = dev_id; u32 chcr; chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); if (!(chcr & CHCR_TE)) return IRQ_NONE; chcr &= ~(CHCR_IE | CHCR_DE); __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); wake_up(&chan->wait_queue); return IRQ_HANDLED; } static int sh_dmac_request_dma(struct dma_channel *chan) { if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) return 0; return request_irq(get_dmte_irq(chan->chan), dma_tei, #if defined(CONFIG_SH_DMA_IRQ_MULTI) IRQF_SHARED, #else 0, #endif chan->dev_id, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) { free_irq(get_dmte_irq(chan->chan), chan); } static int sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) { if (!chcr) chcr = RS_DUAL | CHCR_IE; if (chcr & CHCR_IE) { chcr &= ~CHCR_IE; chan->flags |= DMA_TEI_CAPABLE; } else { chan->flags &= ~DMA_TEI_CAPABLE; } __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); chan->flags |= DMA_CONFIGURED; return 0; } static void sh_dmac_enable_dma(struct dma_channel *chan) { int irq; u32 chcr; chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); chcr |= CHCR_DE; if (chan->flags & DMA_TEI_CAPABLE) chcr |= CHCR_IE; __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); enable_irq(irq); } } static void sh_dmac_disable_dma(struct dma_channel *chan) { int irq; u32 chcr; if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); disable_irq(irq); } chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); } static int sh_dmac_xfer_dma(struct dma_channel *chan) { /* * If we haven't pre-configured the channel with special flags, use * the defaults. */ if (unlikely(!(chan->flags & DMA_CONFIGURED))) sh_dmac_configure_channel(chan, 0); sh_dmac_disable_dma(chan); /* * Single-address mode usage note! * * It's important that we don't accidentally write any value to SAR/DAR * (this includes 0) that hasn't been directly specified by the user if * we're in single-address mode. * * In this case, only one address can be defined, anything else will * result in a DMA address error interrupt (at least on the SH-4), * which will subsequently halt the transfer. * * Channel 2 on the Dreamcast is a special case, as this is used for * cascading to the PVR2 DMAC. In this case, we still need to write * SAR and DAR, regardless of value, in order for cascading to work. */ if (chan->sar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) __raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR)); if (chan->dar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) __raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR)); __raw_writel(chan->count >> calc_xmit_shift(chan), (dma_base_addr[chan->chan] + TCR)); sh_dmac_enable_dma(chan); return 0; } static int sh_dmac_get_dma_residue(struct dma_channel *chan) { if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) return 0; return __raw_readl(dma_base_addr[chan->chan] + TCR) << calc_xmit_shift(chan); } static inline int dmaor_reset(int no) { unsigned long dmaor = dmaor_read_reg(no); /* Try to clear the error flags first, incase they are set */ dmaor &= ~(DMAOR_NMIF | DMAOR_AE); dmaor_write_reg(no, dmaor); dmaor |= DMAOR_INIT; dmaor_write_reg(no, dmaor); /* See if we got an error again */ if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) { printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); return -EINVAL; } return 0; } #if defined(CONFIG_CPU_SH4) static irqreturn_t dma_err(int irq, void *dummy) { #if defined(CONFIG_SH_DMA_IRQ_MULTI) int cnt = 0; switch (irq) { #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) case DMTE6_IRQ: cnt++; #endif case DMTE0_IRQ: if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { disable_irq(irq); /* DMA multi and error IRQ */ return IRQ_HANDLED; } default: return IRQ_NONE; } #else dmaor_reset(0); #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) dmaor_reset(1); #endif disable_irq(irq); return IRQ_HANDLED; #endif } #endif static struct dma_ops sh_dmac_ops = { .request = sh_dmac_request_dma, .free = sh_dmac_free_dma, .get_residue = sh_dmac_get_dma_residue, .xfer = sh_dmac_xfer_dma, .configure = sh_dmac_configure_channel, }; static struct dma_info sh_dmac_info = { .name = "sh_dmac", .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, .ops = &sh_dmac_ops, .flags = DMAC_CHANNELS_TEI_CAPABLE, }; #ifdef CONFIG_CPU_SH4 static unsigned int get_dma_error_irq(int n) { #if defined(CONFIG_SH_DMA_IRQ_MULTI) return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); #else return (n == 0) ? DMAE0_IRQ : #if defined(DMAE1_IRQ) DMAE1_IRQ; #else -1; #endif #endif } #endif static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; int i; #ifdef CONFIG_CPU_SH4 int n; for (n = 0; n < NR_DMAE; n++) { i = request_irq(get_dma_error_irq(n), dma_err, #if defined(CONFIG_SH_DMA_IRQ_MULTI) IRQF_SHARED, #else 0, #endif dmae_name[n], (void *)dmae_name[n]); if (unlikely(i < 0)) { printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); return i; } } #endif /* CONFIG_CPU_SH4 */ /* * Initialize DMAOR, and clean up any error flags that may have * been set. */ i = dmaor_reset(0); if (unlikely(i != 0)) return i; #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) i = dmaor_reset(1); if (unlikely(i != 0)) return i; #endif return register_dmac(info); } static void __exit sh_dmac_exit(void) { #ifdef CONFIG_CPU_SH4 int n; for (n = 0; n < NR_DMAE; n++) { free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); } #endif /* CONFIG_CPU_SH4 */ unregister_dmac(&sh_dmac_info); } subsys_initcall(sh_dmac_init); module_exit(sh_dmac_exit); MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); MODULE_LICENSE("GPL");
gpl-2.0
ansebovi/SmartDeviL_XMD
drivers/atm/eni.c
4922
62738
/* drivers/atm/eni.c - Efficient Networks ENI155P device driver */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/atm_eni.h> #include <linux/bitops.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include <asm/string.h> #include <asm/byteorder.h> #include "tonga.h" #include "midway.h" #include "suni.h" #include "eni.h" #if !defined(__i386__) && !defined(__x86_64__) #ifndef ioremap_nocache #define ioremap_nocache(X,Y) ioremap(X,Y) #endif #endif /* * TODO: * * Show stoppers * none * * Minor * - OAM support * - fix bugs listed below */ /* * KNOWN BUGS: * * - may run into JK-JK bug and deadlock * - should allocate UBR channel first * - buffer space allocation algorithm is stupid * (RX: should be maxSDU+maxdelay*rate * TX: should be maxSDU+min(maxSDU,maxdelay*rate) ) * - doesn't support OAM cells * - eni_put_free may hang if not putting memory fragments that _complete_ * 2^n block (never happens in real life, though) */ #if 0 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif #ifndef CONFIG_ATM_ENI_TUNE_BURST #define CONFIG_ATM_ENI_BURST_TX_8W #define CONFIG_ATM_ENI_BURST_RX_4W #endif #ifndef CONFIG_ATM_ENI_DEBUG #define NULLCHECK(x) #define EVENT(s,a,b) static void event_dump(void) { } #else /* * NULL pointer checking */ #define NULLCHECK(x) \ if ((unsigned long) (x) < 0x30) \ printk(KERN_CRIT #x "==0x%lx\n",(unsigned long) (x)) /* * Very extensive activity logging. Greatly improves bug detection speed but * costs a few Mbps if enabled. */ #define EV 64 static const char *ev[EV]; static unsigned long ev_a[EV],ev_b[EV]; static int ec = 0; static void EVENT(const char *s,unsigned long a,unsigned long b) { ev[ec] = s; ev_a[ec] = a; ev_b[ec] = b; ec = (ec+1) % EV; } static void event_dump(void) { int n,i; for (n = 0; n < EV; n++) { i = (ec+n) % EV; printk(KERN_NOTICE); printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); } } #endif /* CONFIG_ATM_ENI_DEBUG */ /* * NExx must not be equal at end * EExx may be equal at end * xxPJOK verify validity of pointer jumps * xxPMOK operating on a circular buffer of "c" words */ #define NEPJOK(a0,a1,b) \ ((a0) < (a1) ? (b) <= (a0) || (b) > (a1) : (b) <= (a0) && (b) > (a1)) #define EEPJOK(a0,a1,b) \ ((a0) < (a1) ? (b) < (a0) || (b) >= (a1) : (b) < (a0) && (b) >= (a1)) #define NEPMOK(a0,d,b,c) NEPJOK(a0,(a0+d) & (c-1),b) #define EEPMOK(a0,d,b,c) EEPJOK(a0,(a0+d) & (c-1),b) static int tx_complete = 0,dma_complete = 0,queued = 0,requeued = 0, backlogged = 0,rx_enqueued = 0,rx_dequeued = 0,pushed = 0,submitted = 0, putting = 0; static struct atm_dev *eni_boards = NULL; /* Read/write registers on card */ #define eni_in(r) readl(eni_dev->reg+(r)*4) #define eni_out(v,r) writel((v),eni_dev->reg+(r)*4) /*-------------------------------- utilities --------------------------------*/ static void dump_mem(struct eni_dev *eni_dev) { int i; for (i = 0; i < eni_dev->free_len; i++) printk(KERN_DEBUG " %d: %p %d\n",i, eni_dev->free_list[i].start, 1 << eni_dev->free_list[i].order); } static void dump(struct atm_dev *dev) { struct eni_dev *eni_dev; int i; eni_dev = ENI_DEV(dev); printk(KERN_NOTICE "Free memory\n"); dump_mem(eni_dev); printk(KERN_NOTICE "TX buffers\n"); for (i = 0; i < NR_CHAN; i++) if (eni_dev->tx[i].send) printk(KERN_NOTICE " TX %d @ %p: %ld\n",i, eni_dev->tx[i].send,eni_dev->tx[i].words*4); printk(KERN_NOTICE "RX buffers\n"); for (i = 0; i < 1024; i++) if (eni_dev->rx_map[i] && ENI_VCC(eni_dev->rx_map[i])->rx) printk(KERN_NOTICE " RX %d @ %p: %ld\n",i, ENI_VCC(eni_dev->rx_map[i])->recv, ENI_VCC(eni_dev->rx_map[i])->words*4); printk(KERN_NOTICE "----\n"); } static void eni_put_free(struct eni_dev *eni_dev, void __iomem *start, unsigned long size) { struct eni_free *list; int len,order; DPRINTK("init 0x%lx+%ld(0x%lx)\n",start,size,size); start += eni_dev->base_diff; list = eni_dev->free_list; len = eni_dev->free_len; while (size) { if (len >= eni_dev->free_list_size) { printk(KERN_CRIT "eni_put_free overflow (%p,%ld)\n", start,size); break; } for (order = 0; !(((unsigned long)start | size) & (1 << order)); order++); if (MID_MIN_BUF_SIZE > (1 << order)) { printk(KERN_CRIT "eni_put_free: order %d too small\n", order); break; } list[len].start = (void __iomem *) start; list[len].order = order; len++; start += 1 << order; size -= 1 << order; } eni_dev->free_len = len; /*dump_mem(eni_dev);*/ } static void __iomem *eni_alloc_mem(struct eni_dev *eni_dev, unsigned long *size) { struct eni_free *list; void __iomem *start; int len,i,order,best_order,index; list = eni_dev->free_list; len = eni_dev->free_len; if (*size < MID_MIN_BUF_SIZE) *size = MID_MIN_BUF_SIZE; if (*size > MID_MAX_BUF_SIZE) return NULL; for (order = 0; (1 << order) < *size; order++); DPRINTK("trying: %ld->%d\n",*size,order); best_order = 65; /* we don't have more than 2^64 of anything ... */ index = 0; /* silence GCC */ for (i = 0; i < len; i++) if (list[i].order == order) { best_order = order; index = i; break; } else if (best_order > list[i].order && list[i].order > order) { best_order = list[i].order; index = i; } if (best_order == 65) return NULL; start = list[index].start-eni_dev->base_diff; list[index] = list[--len]; eni_dev->free_len = len; *size = 1 << order; eni_put_free(eni_dev,start+*size,(1 << best_order)-*size); DPRINTK("%ld bytes (order %d) at 0x%lx\n",*size,order,start); memset_io(start,0,*size); /* never leak data */ /*dump_mem(eni_dev);*/ return start; } static void eni_free_mem(struct eni_dev *eni_dev, void __iomem *start, unsigned long size) { struct eni_free *list; int len,i,order; start += eni_dev->base_diff; list = eni_dev->free_list; len = eni_dev->free_len; for (order = -1; size; order++) size >>= 1; DPRINTK("eni_free_mem: %p+0x%lx (order %d)\n",start,size,order); for (i = 0; i < len; i++) if (((unsigned long) list[i].start) == ((unsigned long)start^(1 << order)) && list[i].order == order) { DPRINTK("match[%d]: 0x%lx/0x%lx(0x%x), %d/%d\n",i, list[i].start,start,1 << order,list[i].order,order); list[i] = list[--len]; start = (void __iomem *) ((unsigned long) start & ~(unsigned long) (1 << order)); order++; i = -1; continue; } if (len >= eni_dev->free_list_size) { printk(KERN_ALERT "eni_free_mem overflow (%p,%d)\n",start, order); return; } list[len].start = start; list[len].order = order; eni_dev->free_len = len+1; /*dump_mem(eni_dev);*/ } /*----------------------------------- RX ------------------------------------*/ #define ENI_VCC_NOS ((struct atm_vcc *) 1) static void rx_ident_err(struct atm_vcc *vcc) { struct atm_dev *dev; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; dev = vcc->dev; eni_dev = ENI_DEV(dev); /* immediately halt adapter */ eni_out(eni_in(MID_MC_S) & ~(MID_DMA_ENABLE | MID_TX_ENABLE | MID_RX_ENABLE),MID_MC_S); /* dump useful information */ eni_vcc = ENI_VCC(vcc); printk(KERN_ALERT DEV_LABEL "(itf %d): driver error - RX ident " "mismatch\n",dev->number); printk(KERN_ALERT " VCI %d, rxing %d, words %ld\n",vcc->vci, eni_vcc->rxing,eni_vcc->words); printk(KERN_ALERT " host descr 0x%lx, rx pos 0x%lx, descr value " "0x%x\n",eni_vcc->descr,eni_vcc->rx_pos, (unsigned) readl(eni_vcc->recv+eni_vcc->descr*4)); printk(KERN_ALERT " last %p, servicing %d\n",eni_vcc->last, eni_vcc->servicing); EVENT("---dump ends here---\n",0,0); printk(KERN_NOTICE "---recent events---\n"); event_dump(); ENI_DEV(dev)->fast = NULL; /* really stop it */ ENI_DEV(dev)->slow = NULL; skb_queue_head_init(&ENI_DEV(dev)->rx_queue); } static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, unsigned long skip,unsigned long size,unsigned long eff) { struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; u32 dma_rd,dma_wr; u32 dma[RX_DMA_BUF*2]; dma_addr_t paddr; unsigned long here; int i,j; eni_dev = ENI_DEV(vcc->dev); eni_vcc = ENI_VCC(vcc); paddr = 0; /* GCC, shut up */ if (skb) { paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len, PCI_DMA_FROMDEVICE); ENI_PRV_PADDR(skb) = paddr; if (paddr & 3) printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d has " "mis-aligned RX data (0x%lx)\n",vcc->dev->number, vcc->vci,(unsigned long) paddr); ENI_PRV_SIZE(skb) = size+skip; /* PDU plus descriptor */ ATM_SKB(skb)->vcc = vcc; } j = 0; if ((eff && skip) || 1) { /* @@@ actually, skip is always == 1 ... */ here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; j++; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; else { unsigned long words; if (!size) { DPRINTK("strange things happen ...\n"); EVENT("strange things happen ... (skip=%ld,eff=%ld)\n", size,eff); } words = eff; if (paddr & 15) { unsigned long init; init = 4-((paddr & 15) >> 2); if (init > words) init = words; dma[j++] = MID_DT_WORD | (init << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += init << 2; words -= init; } #ifdef CONFIG_ATM_ENI_BURST_RX_16W /* may work with some PCI chipsets ... */ if (words & ~15) { dma[j++] = MID_DT_16W | ((words >> 4) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~15) << 2; words &= 15; } #endif #ifdef CONFIG_ATM_ENI_BURST_RX_8W /* works only with *some* PCI chipsets ... */ if (words & ~7) { dma[j++] = MID_DT_8W | ((words >> 3) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~7) << 2; words &= 7; } #endif #ifdef CONFIG_ATM_ENI_BURST_RX_4W /* recommended */ if (words & ~3) { dma[j++] = MID_DT_4W | ((words >> 2) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~3) << 2; words &= 3; } #endif #ifdef CONFIG_ATM_ENI_BURST_RX_2W /* probably useless if RX_4W, RX_8W, ... */ if (words & ~1) { dma[j++] = MID_DT_2W | ((words >> 1) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~1) << 2; words &= 1; } #endif if (words) { dma[j++] = MID_DT_WORD | (words << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; } } if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; j++; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); goto trouble; } dma[j-2] |= MID_DMA_END; j = j >> 1; dma_wr = eni_in(MID_DMA_WR_RX); dma_rd = eni_in(MID_DMA_RD_RX); /* * Can I move the dma_wr pointer by 2j+1 positions without overwriting * data that hasn't been read (position of dma_rd) yet ? */ if (!NEPMOK(dma_wr,j+j+1,dma_rd,NR_DMA_RX)) { /* @@@ +1 is ugly */ printk(KERN_WARNING DEV_LABEL "(itf %d): RX DMA full\n", vcc->dev->number); goto trouble; } for (i = 0; i < j; i++) { writel(dma[i*2],eni_dev->rx_dma+dma_wr*8); writel(dma[i*2+1],eni_dev->rx_dma+dma_wr*8+4); dma_wr = (dma_wr+1) & (NR_DMA_RX-1); } if (skb) { ENI_PRV_POS(skb) = eni_vcc->descr+size+1; skb_queue_tail(&eni_dev->rx_queue,skb); eni_vcc->last = skb; rx_enqueued++; } eni_vcc->descr = here; eni_out(dma_wr,MID_DMA_WR_RX); return 0; trouble: if (paddr) pci_unmap_single(eni_dev->pci_dev,paddr,skb->len, PCI_DMA_FROMDEVICE); if (skb) dev_kfree_skb_irq(skb); return -1; } static void discard(struct atm_vcc *vcc,unsigned long size) { struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); EVENT("discard (size=%ld)\n",size,0); while (do_rx_dma(vcc,NULL,1,size,0)) EVENT("BUSY LOOP",0,0); /* could do a full fallback, but that might be more expensive */ if (eni_vcc->rxing) ENI_PRV_POS(eni_vcc->last) += size+1; else eni_vcc->rx_pos = (eni_vcc->rx_pos+size+1) & (eni_vcc->words-1); } /* * TODO: should check whether direct copies (without DMA setup, dequeuing on * interrupt, etc.) aren't much faster for AAL0 */ static int rx_aal0(struct atm_vcc *vcc) { struct eni_vcc *eni_vcc; unsigned long descr; unsigned long length; struct sk_buff *skb; DPRINTK(">rx_aal0\n"); eni_vcc = ENI_VCC(vcc); descr = readl(eni_vcc->recv+eni_vcc->descr*4); if ((descr & MID_RED_IDEN) != (MID_RED_RX_ID << MID_RED_SHIFT)) { rx_ident_err(vcc); return 1; } if (descr & MID_RED_T) { DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); length = 0; atomic_inc(&vcc->stats->rx_err); } else { length = ATM_CELL_SIZE-1; /* no HEC */ } skb = length ? atm_alloc_charge(vcc,length,GFP_ATOMIC) : NULL; if (!skb) { discard(vcc,length >> 2); return 0; } skb_put(skb,length); skb->tstamp = eni_vcc->timestamp; DPRINTK("got len %ld\n",length); if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; eni_vcc->rxing++; return 0; } static int rx_aal5(struct atm_vcc *vcc) { struct eni_vcc *eni_vcc; unsigned long descr; unsigned long size,eff,length; struct sk_buff *skb; EVENT("rx_aal5\n",0,0); DPRINTK(">rx_aal5\n"); eni_vcc = ENI_VCC(vcc); descr = readl(eni_vcc->recv+eni_vcc->descr*4); if ((descr & MID_RED_IDEN) != (MID_RED_RX_ID << MID_RED_SHIFT)) { rx_ident_err(vcc); return 1; } if (descr & (MID_RED_T | MID_RED_CRC_ERR)) { if (descr & MID_RED_T) { EVENT("empty cell (descr=0x%lx)\n",descr,0); DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); size = 0; } else { static unsigned long silence = 0; if (time_after(jiffies, silence) || silence == 0) { printk(KERN_WARNING DEV_LABEL "(itf %d): " "discarding PDU(s) with CRC error\n", vcc->dev->number); silence = (jiffies+2*HZ)|1; } size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); EVENT("CRC error (descr=0x%lx,size=%ld)\n",descr, size); } eff = length = 0; atomic_inc(&vcc->stats->rx_err); } else { size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); DPRINTK("size=%ld\n",size); length = readl(eni_vcc->recv+(((eni_vcc->descr+size-1) & (eni_vcc->words-1)))*4) & 0xffff; /* -trailer(2)+header(1) */ if (length && length <= (size << 2)-8 && length <= ATM_MAX_AAL5_PDU) eff = (length+3) >> 2; else { /* ^ trailer length (8) */ EVENT("bad PDU (descr=0x08%lx,length=%ld)\n",descr, length); printk(KERN_ERR DEV_LABEL "(itf %d): bad AAL5 PDU " "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", vcc->dev->number,vcc->vci,length,size << 2,descr); length = eff = 0; atomic_inc(&vcc->stats->rx_err); } } skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; if (!skb) { discard(vcc,size); return 0; } skb_put(skb,length); DPRINTK("got len %ld\n",length); if (do_rx_dma(vcc,skb,1,size,eff)) return 1; eni_vcc->rxing++; return 0; } static inline int rx_vcc(struct atm_vcc *vcc) { void __iomem *vci_dsc; unsigned long tmp; struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); vci_dsc = ENI_DEV(vcc->dev)->vci+vcc->vci*16; EVENT("rx_vcc(1)\n",0,0); while (eni_vcc->descr != (tmp = (readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)) { EVENT("rx_vcc(2: host dsc=0x%lx, nic dsc=0x%lx)\n", eni_vcc->descr,tmp); DPRINTK("CB_DESCR %ld REG_DESCR %d\n",ENI_VCC(vcc)->descr, (((unsigned) readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)); if (ENI_VCC(vcc)->rx(vcc)) return 1; } /* clear IN_SERVICE flag */ writel(readl(vci_dsc) & ~MID_VCI_IN_SERVICE,vci_dsc); /* * If new data has arrived between evaluating the while condition and * clearing IN_SERVICE, we wouldn't be notified until additional data * follows. So we have to loop again to be sure. */ EVENT("rx_vcc(3)\n",0,0); while (ENI_VCC(vcc)->descr != (tmp = (readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)) { EVENT("rx_vcc(4: host dsc=0x%lx, nic dsc=0x%lx)\n", eni_vcc->descr,tmp); DPRINTK("CB_DESCR %ld REG_DESCR %d\n",ENI_VCC(vcc)->descr, (((unsigned) readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)); if (ENI_VCC(vcc)->rx(vcc)) return 1; } return 0; } static void poll_rx(struct atm_dev *dev) { struct eni_dev *eni_dev; struct atm_vcc *curr; eni_dev = ENI_DEV(dev); while ((curr = eni_dev->fast)) { EVENT("poll_rx.fast\n",0,0); if (rx_vcc(curr)) return; eni_dev->fast = ENI_VCC(curr)->next; ENI_VCC(curr)->next = ENI_VCC_NOS; barrier(); ENI_VCC(curr)->servicing--; } while ((curr = eni_dev->slow)) { EVENT("poll_rx.slow\n",0,0); if (rx_vcc(curr)) return; eni_dev->slow = ENI_VCC(curr)->next; ENI_VCC(curr)->next = ENI_VCC_NOS; barrier(); ENI_VCC(curr)->servicing--; } } static void get_service(struct atm_dev *dev) { struct eni_dev *eni_dev; struct atm_vcc *vcc; unsigned long vci; DPRINTK(">get_service\n"); eni_dev = ENI_DEV(dev); while (eni_in(MID_SERV_WRITE) != eni_dev->serv_read) { vci = readl(eni_dev->service+eni_dev->serv_read*4); eni_dev->serv_read = (eni_dev->serv_read+1) & (NR_SERVICE-1); vcc = eni_dev->rx_map[vci & 1023]; if (!vcc) { printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %ld not " "found\n",dev->number,vci); continue; /* nasty but we try to go on anyway */ /* @@@ nope, doesn't work */ } EVENT("getting from service\n",0,0); if (ENI_VCC(vcc)->next != ENI_VCC_NOS) { EVENT("double service\n",0,0); DPRINTK("Grr, servicing VCC %ld twice\n",vci); continue; } ENI_VCC(vcc)->timestamp = ktime_get_real(); ENI_VCC(vcc)->next = NULL; if (vcc->qos.rxtp.traffic_class == ATM_CBR) { if (eni_dev->fast) ENI_VCC(eni_dev->last_fast)->next = vcc; else eni_dev->fast = vcc; eni_dev->last_fast = vcc; } else { if (eni_dev->slow) ENI_VCC(eni_dev->last_slow)->next = vcc; else eni_dev->slow = vcc; eni_dev->last_slow = vcc; } putting++; ENI_VCC(vcc)->servicing++; } } static void dequeue_rx(struct atm_dev *dev) { struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; struct atm_vcc *vcc; struct sk_buff *skb; void __iomem *vci_dsc; int first; eni_dev = ENI_DEV(dev); first = 1; while (1) { skb = skb_dequeue(&eni_dev->rx_queue); if (!skb) { if (first) { DPRINTK(DEV_LABEL "(itf %d): RX but not " "rxing\n",dev->number); EVENT("nothing to dequeue\n",0,0); } break; } EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb), ENI_PRV_POS(skb)); rx_dequeued++; vcc = ATM_SKB(skb)->vcc; eni_vcc = ENI_VCC(vcc); first = 0; vci_dsc = eni_dev->vci+vcc->vci*16; if (!EEPMOK(eni_vcc->rx_pos,ENI_PRV_SIZE(skb), (readl(vci_dsc+4) & MID_VCI_READ) >> MID_VCI_READ_SHIFT, eni_vcc->words)) { EVENT("requeuing\n",0,0); skb_queue_head(&eni_dev->rx_queue,skb); break; } eni_vcc->rxing--; eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1); pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len, PCI_DMA_TODEVICE); if (!skb->len) dev_kfree_skb_irq(skb); else { EVENT("pushing (len=%ld)\n",skb->len,0); if (vcc->qos.aal == ATM_AAL0) *(unsigned long *) skb->data = ntohl(*(unsigned long *) skb->data); memset(skb->cb,0,sizeof(struct eni_skb_prv)); vcc->push(vcc,skb); pushed++; } atomic_inc(&vcc->stats->rx); } wake_up(&eni_dev->rx_wait); } static int open_rx_first(struct atm_vcc *vcc) { struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; unsigned long size; DPRINTK("open_rx_first\n"); eni_dev = ENI_DEV(vcc->dev); eni_vcc = ENI_VCC(vcc); eni_vcc->rx = NULL; if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; size = vcc->qos.rxtp.max_sdu*eni_dev->rx_mult/100; if (size > MID_MAX_BUF_SIZE && vcc->qos.rxtp.max_sdu <= MID_MAX_BUF_SIZE) size = MID_MAX_BUF_SIZE; eni_vcc->recv = eni_alloc_mem(eni_dev,&size); DPRINTK("rx at 0x%lx\n",eni_vcc->recv); eni_vcc->words = size >> 2; if (!eni_vcc->recv) return -ENOBUFS; eni_vcc->rx = vcc->qos.aal == ATM_AAL5 ? rx_aal5 : rx_aal0; eni_vcc->descr = 0; eni_vcc->rx_pos = 0; eni_vcc->rxing = 0; eni_vcc->servicing = 0; eni_vcc->next = ENI_VCC_NOS; return 0; } static int open_rx_second(struct atm_vcc *vcc) { void __iomem *here; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; unsigned long size; int order; DPRINTK("open_rx_second\n"); eni_dev = ENI_DEV(vcc->dev); eni_vcc = ENI_VCC(vcc); if (!eni_vcc->rx) return 0; /* set up VCI descriptor */ here = eni_dev->vci+vcc->vci*16; DPRINTK("loc 0x%x\n",(unsigned) (eni_vcc->recv-eni_dev->ram)/4); size = eni_vcc->words >> 8; for (order = -1; size; order++) size >>= 1; writel(0,here+4); /* descr, read = 0 */ writel(0,here+8); /* write, state, count = 0 */ if (eni_dev->rx_map[vcc->vci]) printk(KERN_CRIT DEV_LABEL "(itf %d): BUG - VCI %d already " "in use\n",vcc->dev->number,vcc->vci); eni_dev->rx_map[vcc->vci] = vcc; /* now it counts */ writel(((vcc->qos.aal != ATM_AAL5 ? MID_MODE_RAW : MID_MODE_AAL5) << MID_VCI_MODE_SHIFT) | MID_VCI_PTI_MODE | (((eni_vcc->recv-eni_dev->ram) >> (MID_LOC_SKIP+2)) << MID_VCI_LOCATION_SHIFT) | (order << MID_VCI_SIZE_SHIFT),here); return 0; } static void close_rx(struct atm_vcc *vcc) { DECLARE_WAITQUEUE(wait,current); void __iomem *here; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); if (!eni_vcc->rx) return; eni_dev = ENI_DEV(vcc->dev); if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { here = eni_dev->vci+vcc->vci*16; /* block receiver */ writel((readl(here) & ~MID_VCI_MODE) | (MID_MODE_TRASH << MID_VCI_MODE_SHIFT),here); /* wait for receiver to become idle */ udelay(27); /* discard pending cell */ writel(readl(here) & ~MID_VCI_IN_SERVICE,here); /* don't accept any new ones */ eni_dev->rx_map[vcc->vci] = NULL; /* wait for RX queue to drain */ DPRINTK("eni_close: waiting for RX ...\n"); EVENT("RX closing\n",0,0); add_wait_queue(&eni_dev->rx_wait,&wait); set_current_state(TASK_UNINTERRUPTIBLE); barrier(); for (;;) { /* transition service->rx: rxing++, servicing-- */ if (!eni_vcc->servicing) { barrier(); if (!eni_vcc->rxing) break; } EVENT("drain PDUs (rx %ld, serv %ld)\n",eni_vcc->rxing, eni_vcc->servicing); printk(KERN_INFO "%d+%d RX left\n",eni_vcc->servicing, eni_vcc->rxing); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } for (;;) { int at_end; u32 tmp; tasklet_disable(&eni_dev->task); tmp = readl(eni_dev->vci+vcc->vci*16+4) & MID_VCI_READ; at_end = eni_vcc->rx_pos == tmp >> MID_VCI_READ_SHIFT; tasklet_enable(&eni_dev->task); if (at_end) break; EVENT("drain discard (host 0x%lx, nic 0x%lx)\n", eni_vcc->rx_pos,tmp); printk(KERN_INFO "draining RX: host 0x%lx, nic 0x%x\n", eni_vcc->rx_pos,tmp); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&eni_dev->rx_wait,&wait); } eni_free_mem(eni_dev,eni_vcc->recv,eni_vcc->words << 2); eni_vcc->rx = NULL; } static int start_rx(struct atm_dev *dev) { struct eni_dev *eni_dev; eni_dev = ENI_DEV(dev); eni_dev->rx_map = (struct atm_vcc **) get_zeroed_page(GFP_KERNEL); if (!eni_dev->rx_map) { printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", dev->number); free_page((unsigned long) eni_dev->free_list); return -ENOMEM; } eni_dev->rx_mult = DEFAULT_RX_MULT; eni_dev->fast = eni_dev->last_fast = NULL; eni_dev->slow = eni_dev->last_slow = NULL; init_waitqueue_head(&eni_dev->rx_wait); skb_queue_head_init(&eni_dev->rx_queue); eni_dev->serv_read = eni_in(MID_SERV_WRITE); eni_out(0,MID_DMA_WR_RX); return 0; } /*----------------------------------- TX ------------------------------------*/ enum enq_res { enq_ok,enq_next,enq_jam }; static inline void put_dma(int chan,u32 *dma,int *j,dma_addr_t paddr, u32 size) { u32 init,words; DPRINTK("put_dma: 0x%lx+0x%x\n",(unsigned long) paddr,size); EVENT("put_dma: 0x%lx+0x%lx\n",(unsigned long) paddr,size); #if 0 /* don't complain anymore */ if (paddr & 3) printk(KERN_ERR "put_dma: unaligned addr (0x%lx)\n",paddr); if (size & 3) printk(KERN_ERR "put_dma: unaligned size (0x%lx)\n",size); #endif if (paddr & 3) { init = 4-(paddr & 3); if (init > size || size < 7) init = size; DPRINTK("put_dma: %lx DMA: %d/%d bytes\n", (unsigned long) paddr,init,size); dma[(*j)++] = MID_DT_BYTE | (init << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += init; size -= init; } words = size >> 2; size &= 3; if (words && (paddr & 31)) { init = 8-((paddr & 31) >> 2); if (init > words) init = words; DPRINTK("put_dma: %lx DMA: %d/%d words\n", (unsigned long) paddr,init,words); dma[(*j)++] = MID_DT_WORD | (init << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += init << 2; words -= init; } #ifdef CONFIG_ATM_ENI_BURST_TX_16W /* may work with some PCI chipsets ... */ if (words & ~15) { DPRINTK("put_dma: %lx DMA: %d*16/%d words\n", (unsigned long) paddr,words >> 4,words); dma[(*j)++] = MID_DT_16W | ((words >> 4) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~15) << 2; words &= 15; } #endif #ifdef CONFIG_ATM_ENI_BURST_TX_8W /* recommended */ if (words & ~7) { DPRINTK("put_dma: %lx DMA: %d*8/%d words\n", (unsigned long) paddr,words >> 3,words); dma[(*j)++] = MID_DT_8W | ((words >> 3) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~7) << 2; words &= 7; } #endif #ifdef CONFIG_ATM_ENI_BURST_TX_4W /* probably useless if TX_8W or TX_16W */ if (words & ~3) { DPRINTK("put_dma: %lx DMA: %d*4/%d words\n", (unsigned long) paddr,words >> 2,words); dma[(*j)++] = MID_DT_4W | ((words >> 2) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~3) << 2; words &= 3; } #endif #ifdef CONFIG_ATM_ENI_BURST_TX_2W /* probably useless if TX_4W, TX_8W, ... */ if (words & ~1) { DPRINTK("put_dma: %lx DMA: %d*2/%d words\n", (unsigned long) paddr,words >> 1,words); dma[(*j)++] = MID_DT_2W | ((words >> 1) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~1) << 2; words &= 1; } #endif if (words) { DPRINTK("put_dma: %lx DMA: %d words\n",(unsigned long) paddr, words); dma[(*j)++] = MID_DT_WORD | (words << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += words << 2; } if (size) { DPRINTK("put_dma: %lx DMA: %d bytes\n",(unsigned long) paddr, size); dma[(*j)++] = MID_DT_BYTE | (size << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; } } static enum enq_res do_tx(struct sk_buff *skb) { struct atm_vcc *vcc; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; struct eni_tx *tx; dma_addr_t paddr; u32 dma_rd,dma_wr; u32 size; /* in words */ int aal5,dma_size,i,j; DPRINTK(">do_tx\n"); NULLCHECK(skb); EVENT("do_tx: skb=0x%lx, %ld bytes\n",(unsigned long) skb,skb->len); vcc = ATM_SKB(skb)->vcc; NULLCHECK(vcc); eni_dev = ENI_DEV(vcc->dev); NULLCHECK(eni_dev); eni_vcc = ENI_VCC(vcc); tx = eni_vcc->tx; NULLCHECK(tx); #if 0 /* Enable this for testing with the "align" program */ { unsigned int hack = *((char *) skb->data)-'0'; if (hack < 8) { skb->data += hack; skb->len -= hack; } } #endif #if 0 /* should work now */ if ((unsigned long) skb->data & 3) printk(KERN_ERR DEV_LABEL "(itf %d): VCI %d has mis-aligned " "TX data\n",vcc->dev->number,vcc->vci); #endif /* * Potential future IP speedup: make hard_header big enough to put * segmentation descriptor directly into PDU. Saves: 4 slave writes, * 1 DMA xfer & 2 DMA'ed bytes (protocol layering is for wimps :-) */ aal5 = vcc->qos.aal == ATM_AAL5; /* check space in buffer */ if (!aal5) size = (ATM_CELL_PAYLOAD >> 2)+TX_DESCR_SIZE; /* cell without HEC plus segmentation header (includes four-byte cell header) */ else { size = skb->len+4*AAL5_TRAILER+ATM_CELL_PAYLOAD-1; /* add AAL5 trailer */ size = ((size-(size % ATM_CELL_PAYLOAD)) >> 2)+TX_DESCR_SIZE; /* add segmentation header */ } /* * Can I move tx_pos by size bytes without getting closer than TX_GAP * to the read pointer ? TX_GAP means to leave some space for what * the manual calls "too close". */ if (!NEPMOK(tx->tx_pos,size+TX_GAP, eni_in(MID_TX_RDPTR(tx->index)),tx->words)) { DPRINTK(DEV_LABEL "(itf %d): TX full (size %d)\n", vcc->dev->number,size); return enq_next; } /* check DMA */ dma_wr = eni_in(MID_DMA_WR_TX); dma_rd = eni_in(MID_DMA_RD_TX); dma_size = 3; /* JK for descriptor and final fill, plus final size mis-alignment fix */ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); if (!skb_shinfo(skb)->nr_frags) dma_size += 5; else dma_size += 5*(skb_shinfo(skb)->nr_frags+1); if (dma_size > TX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "(itf %d): needs %d DMA entries " "(got only %d)\n",vcc->dev->number,dma_size,TX_DMA_BUF); } DPRINTK("dma_wr is %d, tx_pos is %ld\n",dma_wr,tx->tx_pos); if (dma_wr != dma_rd && ((dma_rd+NR_DMA_TX-dma_wr) & (NR_DMA_TX-1)) < dma_size) { printk(KERN_WARNING DEV_LABEL "(itf %d): TX DMA full\n", vcc->dev->number); return enq_jam; } paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len, PCI_DMA_TODEVICE); ENI_PRV_PADDR(skb) = paddr; /* prepare DMA queue entries */ j = 0; eni_dev->dma[j++] = (((tx->tx_pos+TX_DESCR_SIZE) & (tx->words-1)) << MID_DMA_COUNT_SHIFT) | (tx->index << MID_DMA_CHAN_SHIFT) | MID_DT_JK; j++; if (!skb_shinfo(skb)->nr_frags) if (aal5) put_dma(tx->index,eni_dev->dma,&j,paddr,skb->len); else put_dma(tx->index,eni_dev->dma,&j,paddr+4,skb->len-4); else { DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */ for (i = -1; i < skb_shinfo(skb)->nr_frags; i++) if (i == -1) put_dma(tx->index,eni_dev->dma,&j,(unsigned long) skb->data, skb_headlen(skb)); else put_dma(tx->index,eni_dev->dma,&j,(unsigned long) skb_frag_page(&skb_shinfo(skb)->frags[i]) + skb_shinfo(skb)->frags[i].page_offset, skb_frag_size(&skb_shinfo(skb)->frags[i])); } if (skb->len & 3) { put_dma(tx->index, eni_dev->dma, &j, eni_dev->zero.dma, 4 - (skb->len & 3)); } /* JK for AAL5 trailer - AAL0 doesn't need it, but who cares ... */ eni_dev->dma[j++] = (((tx->tx_pos+size) & (tx->words-1)) << MID_DMA_COUNT_SHIFT) | (tx->index << MID_DMA_CHAN_SHIFT) | MID_DMA_END | MID_DT_JK; j++; DPRINTK("DMA at end: %d\n",j); /* store frame */ writel((MID_SEG_TX_ID << MID_SEG_ID_SHIFT) | (aal5 ? MID_SEG_AAL5 : 0) | (tx->prescaler << MID_SEG_PR_SHIFT) | (tx->resolution << MID_SEG_RATE_SHIFT) | (size/(ATM_CELL_PAYLOAD/4)),tx->send+tx->tx_pos*4); /*printk("dsc = 0x%08lx\n",(unsigned long) readl(tx->send+tx->tx_pos*4));*/ writel((vcc->vci << MID_SEG_VCI_SHIFT) | (aal5 ? 0 : (skb->data[3] & 0xf)) | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? MID_SEG_CLP : 0), tx->send+((tx->tx_pos+1) & (tx->words-1))*4); DPRINTK("size: %d, len:%d\n",size,skb->len); if (aal5) writel(skb->len,tx->send+ ((tx->tx_pos+size-AAL5_TRAILER) & (tx->words-1))*4); j = j >> 1; for (i = 0; i < j; i++) { writel(eni_dev->dma[i*2],eni_dev->tx_dma+dma_wr*8); writel(eni_dev->dma[i*2+1],eni_dev->tx_dma+dma_wr*8+4); dma_wr = (dma_wr+1) & (NR_DMA_TX-1); } ENI_PRV_POS(skb) = tx->tx_pos; ENI_PRV_SIZE(skb) = size; ENI_VCC(vcc)->txing += size; tx->tx_pos = (tx->tx_pos+size) & (tx->words-1); DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos); eni_out(dma_wr,MID_DMA_WR_TX); skb_queue_tail(&eni_dev->tx_queue,skb); queued++; return enq_ok; } static void poll_tx(struct atm_dev *dev) { struct eni_tx *tx; struct sk_buff *skb; enum enq_res res; int i; DPRINTK(">poll_tx\n"); for (i = NR_CHAN-1; i >= 0; i--) { tx = &ENI_DEV(dev)->tx[i]; if (tx->send) while ((skb = skb_dequeue(&tx->backlog))) { res = do_tx(skb); if (res == enq_ok) continue; DPRINTK("re-queuing TX PDU\n"); skb_queue_head(&tx->backlog,skb); requeued++; if (res == enq_jam) return; break; } } } static void dequeue_tx(struct atm_dev *dev) { struct eni_dev *eni_dev; struct atm_vcc *vcc; struct sk_buff *skb; struct eni_tx *tx; NULLCHECK(dev); eni_dev = ENI_DEV(dev); NULLCHECK(eni_dev); while ((skb = skb_dequeue(&eni_dev->tx_queue))) { vcc = ATM_SKB(skb)->vcc; NULLCHECK(vcc); tx = ENI_VCC(vcc)->tx; NULLCHECK(ENI_VCC(vcc)->tx); DPRINTK("dequeue_tx: next 0x%lx curr 0x%x\n",ENI_PRV_POS(skb), (unsigned) eni_in(MID_TX_DESCRSTART(tx->index))); if (ENI_VCC(vcc)->txing < tx->words && ENI_PRV_POS(skb) == eni_in(MID_TX_DESCRSTART(tx->index))) { skb_queue_head(&eni_dev->tx_queue,skb); break; } ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb); pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len, PCI_DMA_TODEVICE); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); atomic_inc(&vcc->stats->tx); wake_up(&eni_dev->tx_wait); dma_complete++; } } static struct eni_tx *alloc_tx(struct eni_dev *eni_dev,int ubr) { int i; for (i = !ubr; i < NR_CHAN; i++) if (!eni_dev->tx[i].send) return eni_dev->tx+i; return NULL; } static int comp_tx(struct eni_dev *eni_dev,int *pcr,int reserved,int *pre, int *res,int unlimited) { static const int pre_div[] = { 4,16,128,2048 }; /* 2^(((x+2)^2-(x+2))/2+1) */ if (unlimited) *pre = *res = 0; else { if (*pcr > 0) { int div; for (*pre = 0; *pre < 3; (*pre)++) if (TS_CLOCK/pre_div[*pre]/64 <= *pcr) break; div = pre_div[*pre]**pcr; DPRINTK("min div %d\n",div); *res = TS_CLOCK/div-1; } else { int div; if (!*pcr) *pcr = eni_dev->tx_bw+reserved; for (*pre = 3; *pre >= 0; (*pre)--) if (TS_CLOCK/pre_div[*pre]/64 > -*pcr) break; if (*pre < 3) (*pre)++; /* else fail later */ div = pre_div[*pre]*-*pcr; DPRINTK("max div %d\n",div); *res = DIV_ROUND_UP(TS_CLOCK, div)-1; } if (*res < 0) *res = 0; if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE; } *pcr = TS_CLOCK/pre_div[*pre]/(*res+1); DPRINTK("out pcr: %d (%d:%d)\n",*pcr,*pre,*res); return 0; } static int reserve_or_set_tx(struct atm_vcc *vcc,struct atm_trafprm *txtp, int set_rsv,int set_shp) { struct eni_dev *eni_dev = ENI_DEV(vcc->dev); struct eni_vcc *eni_vcc = ENI_VCC(vcc); struct eni_tx *tx; unsigned long size; void __iomem *mem; int rate,ubr,unlimited,new_tx; int pre,res,order; int error; rate = atm_pcr_goal(txtp); ubr = txtp->traffic_class == ATM_UBR; unlimited = ubr && (!rate || rate <= -ATM_OC3_PCR || rate >= ATM_OC3_PCR); if (!unlimited) { size = txtp->max_sdu*eni_dev->tx_mult/100; if (size > MID_MAX_BUF_SIZE && txtp->max_sdu <= MID_MAX_BUF_SIZE) size = MID_MAX_BUF_SIZE; } else { if (eni_dev->ubr) { eni_vcc->tx = eni_dev->ubr; txtp->pcr = ATM_OC3_PCR; return 0; } size = UBR_BUFFER; } new_tx = !eni_vcc->tx; mem = NULL; /* for gcc */ if (!new_tx) tx = eni_vcc->tx; else { mem = eni_alloc_mem(eni_dev,&size); if (!mem) return -ENOBUFS; tx = alloc_tx(eni_dev,unlimited); if (!tx) { eni_free_mem(eni_dev,mem,size); return -EBUSY; } DPRINTK("got chan %d\n",tx->index); tx->reserved = tx->shaping = 0; tx->send = mem; tx->words = size >> 2; skb_queue_head_init(&tx->backlog); for (order = 0; size > (1 << (order+10)); order++); eni_out((order << MID_SIZE_SHIFT) | ((tx->send-eni_dev->ram) >> (MID_LOC_SKIP+2)), MID_TX_PLACE(tx->index)); tx->tx_pos = eni_in(MID_TX_DESCRSTART(tx->index)) & MID_DESCR_START; } error = comp_tx(eni_dev,&rate,tx->reserved,&pre,&res,unlimited); if (!error && txtp->min_pcr > rate) error = -EINVAL; if (!error && txtp->max_pcr && txtp->max_pcr != ATM_MAX_PCR && txtp->max_pcr < rate) error = -EINVAL; if (!error && !ubr && rate > eni_dev->tx_bw+tx->reserved) error = -EINVAL; if (!error && set_rsv && !set_shp && rate < tx->shaping) error = -EINVAL; if (!error && !set_rsv && rate > tx->reserved && !ubr) error = -EINVAL; if (error) { if (new_tx) { tx->send = NULL; eni_free_mem(eni_dev,mem,size); } return error; } txtp->pcr = rate; if (set_rsv && !ubr) { eni_dev->tx_bw += tx->reserved; tx->reserved = rate; eni_dev->tx_bw -= rate; } if (set_shp || (unlimited && new_tx)) { if (unlimited && new_tx) eni_dev->ubr = tx; tx->prescaler = pre; tx->resolution = res; tx->shaping = rate; } if (set_shp) eni_vcc->tx = tx; DPRINTK("rsv %d shp %d\n",tx->reserved,tx->shaping); return 0; } static int open_tx_first(struct atm_vcc *vcc) { ENI_VCC(vcc)->tx = NULL; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; ENI_VCC(vcc)->txing = 0; return reserve_or_set_tx(vcc,&vcc->qos.txtp,1,1); } static int open_tx_second(struct atm_vcc *vcc) { return 0; /* nothing to do */ } static void close_tx(struct atm_vcc *vcc) { DECLARE_WAITQUEUE(wait,current); struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); if (!eni_vcc->tx) return; eni_dev = ENI_DEV(vcc->dev); /* wait for TX queue to drain */ DPRINTK("eni_close: waiting for TX ...\n"); add_wait_queue(&eni_dev->tx_wait,&wait); set_current_state(TASK_UNINTERRUPTIBLE); for (;;) { int txing; tasklet_disable(&eni_dev->task); txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing; tasklet_enable(&eni_dev->task); if (!txing) break; DPRINTK("%d TX left\n",eni_vcc->txing); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&eni_dev->tx_wait,&wait); if (eni_vcc->tx != eni_dev->ubr) { /* * Looping a few times in here is probably far cheaper than * keeping track of TX completions all the time, so let's poll * a bit ... */ while (eni_in(MID_TX_RDPTR(eni_vcc->tx->index)) != eni_in(MID_TX_DESCRSTART(eni_vcc->tx->index))) schedule(); eni_free_mem(eni_dev,eni_vcc->tx->send,eni_vcc->tx->words << 2); eni_vcc->tx->send = NULL; eni_dev->tx_bw += eni_vcc->tx->reserved; } eni_vcc->tx = NULL; } static int start_tx(struct atm_dev *dev) { struct eni_dev *eni_dev; int i; eni_dev = ENI_DEV(dev); eni_dev->lost = 0; eni_dev->tx_bw = ATM_OC3_PCR; eni_dev->tx_mult = DEFAULT_TX_MULT; init_waitqueue_head(&eni_dev->tx_wait); eni_dev->ubr = NULL; skb_queue_head_init(&eni_dev->tx_queue); eni_out(0,MID_DMA_WR_TX); for (i = 0; i < NR_CHAN; i++) { eni_dev->tx[i].send = NULL; eni_dev->tx[i].index = i; } return 0; } /*--------------------------------- common ----------------------------------*/ #if 0 /* may become useful again when tuning things */ static void foo(void) { printk(KERN_INFO "tx_complete=%d,dma_complete=%d,queued=%d,requeued=%d,sub=%d,\n" "backlogged=%d,rx_enqueued=%d,rx_dequeued=%d,putting=%d,pushed=%d\n", tx_complete,dma_complete,queued,requeued,submitted,backlogged, rx_enqueued,rx_dequeued,putting,pushed); if (eni_boards) printk(KERN_INFO "loss: %ld\n",ENI_DEV(eni_boards)->lost); } #endif static void bug_int(struct atm_dev *dev,unsigned long reason) { DPRINTK(">bug_int\n"); if (reason & MID_DMA_ERR_ACK) printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - DMA " "error\n",dev->number); if (reason & MID_TX_IDENT_MISM) printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - ident " "mismatch\n",dev->number); if (reason & MID_TX_DMA_OVFL) printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - DMA " "overflow\n",dev->number); EVENT("---dump ends here---\n",0,0); printk(KERN_NOTICE "---recent events---\n"); event_dump(); } static irqreturn_t eni_int(int irq,void *dev_id) { struct atm_dev *dev; struct eni_dev *eni_dev; u32 reason; DPRINTK(">eni_int\n"); dev = dev_id; eni_dev = ENI_DEV(dev); reason = eni_in(MID_ISA); DPRINTK(DEV_LABEL ": int 0x%lx\n",(unsigned long) reason); /* * Must handle these two right now, because reading ISA doesn't clear * them, so they re-occur and we never make it to the tasklet. Since * they're rare, we don't mind the occasional invocation of eni_tasklet * with eni_dev->events == 0. */ if (reason & MID_STAT_OVFL) { EVENT("stat overflow\n",0,0); eni_dev->lost += eni_in(MID_STAT) & MID_OVFL_TRASH; } if (reason & MID_SUNI_INT) { EVENT("SUNI int\n",0,0); dev->phy->interrupt(dev); #if 0 foo(); #endif } spin_lock(&eni_dev->lock); eni_dev->events |= reason; spin_unlock(&eni_dev->lock); tasklet_schedule(&eni_dev->task); return IRQ_HANDLED; } static void eni_tasklet(unsigned long data) { struct atm_dev *dev = (struct atm_dev *) data; struct eni_dev *eni_dev = ENI_DEV(dev); unsigned long flags; u32 events; DPRINTK("eni_tasklet (dev %p)\n",dev); spin_lock_irqsave(&eni_dev->lock,flags); events = xchg(&eni_dev->events,0); spin_unlock_irqrestore(&eni_dev->lock,flags); if (events & MID_RX_DMA_COMPLETE) { EVENT("INT: RX DMA complete, starting dequeue_rx\n",0,0); dequeue_rx(dev); EVENT("dequeue_rx done, starting poll_rx\n",0,0); poll_rx(dev); EVENT("poll_rx done\n",0,0); /* poll_tx ? */ } if (events & MID_SERVICE) { EVENT("INT: service, starting get_service\n",0,0); get_service(dev); EVENT("get_service done, starting poll_rx\n",0,0); poll_rx(dev); EVENT("poll_rx done\n",0,0); } if (events & MID_TX_DMA_COMPLETE) { EVENT("INT: TX DMA COMPLETE\n",0,0); dequeue_tx(dev); } if (events & MID_TX_COMPLETE) { EVENT("INT: TX COMPLETE\n",0,0); tx_complete++; wake_up(&eni_dev->tx_wait); /* poll_rx ? */ } if (events & (MID_DMA_ERR_ACK | MID_TX_IDENT_MISM | MID_TX_DMA_OVFL)) { EVENT("bug interrupt\n",0,0); bug_int(dev,events); } poll_tx(dev); } /*--------------------------------- entries ---------------------------------*/ static const char *media_name[] __devinitdata = { "MMF", "SMF", "MMF", "03?", /* 0- 3 */ "UTP", "05?", "06?", "07?", /* 4- 7 */ "TAXI","09?", "10?", "11?", /* 8-11 */ "12?", "13?", "14?", "15?", /* 12-15 */ "MMF", "SMF", "18?", "19?", /* 16-19 */ "UTP", "21?", "22?", "23?", /* 20-23 */ "24?", "25?", "26?", "27?", /* 24-27 */ "28?", "29?", "30?", "31?" /* 28-31 */ }; #define SET_SEPROM \ ({ if (!error && !pci_error) { \ pci_error = pci_write_config_byte(eni_dev->pci_dev,PCI_TONGA_CTRL,tonga); \ udelay(10); /* 10 usecs */ \ } }) #define GET_SEPROM \ ({ if (!error && !pci_error) { \ pci_error = pci_read_config_byte(eni_dev->pci_dev,PCI_TONGA_CTRL,&tonga); \ udelay(10); /* 10 usecs */ \ } }) static int __devinit get_esi_asic(struct atm_dev *dev) { struct eni_dev *eni_dev; unsigned char tonga; int error,failed,pci_error; int address,i,j; eni_dev = ENI_DEV(dev); error = pci_error = 0; tonga = SEPROM_MAGIC | SEPROM_DATA | SEPROM_CLK; SET_SEPROM; for (i = 0; i < ESI_LEN && !error && !pci_error; i++) { /* start operation */ tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; tonga &= ~SEPROM_DATA; SET_SEPROM; tonga &= ~SEPROM_CLK; SET_SEPROM; /* send address */ address = ((i+SEPROM_ESI_BASE) << 1)+1; for (j = 7; j >= 0; j--) { tonga = (address >> j) & 1 ? tonga | SEPROM_DATA : tonga & ~SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; tonga &= ~SEPROM_CLK; SET_SEPROM; } /* get ack */ tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; GET_SEPROM; failed = tonga & SEPROM_DATA; tonga &= ~SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; if (failed) error = -EIO; else { dev->esi[i] = 0; for (j = 7; j >= 0; j--) { dev->esi[i] <<= 1; tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; GET_SEPROM; if (tonga & SEPROM_DATA) dev->esi[i] |= 1; tonga &= ~SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; } /* get ack */ tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; GET_SEPROM; if (!(tonga & SEPROM_DATA)) error = -EIO; tonga &= ~SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; } /* stop operation */ tonga &= ~SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; } if (pci_error) { printk(KERN_ERR DEV_LABEL "(itf %d): error reading ESI " "(0x%02x)\n",dev->number,pci_error); error = -EIO; } return error; } #undef SET_SEPROM #undef GET_SEPROM static int __devinit get_esi_fpga(struct atm_dev *dev, void __iomem *base) { void __iomem *mac_base; int i; mac_base = base+EPROM_SIZE-sizeof(struct midway_eprom); for (i = 0; i < ESI_LEN; i++) dev->esi[i] = readb(mac_base+(i^3)); return 0; } static int __devinit eni_do_init(struct atm_dev *dev) { struct midway_eprom __iomem *eprom; struct eni_dev *eni_dev; struct pci_dev *pci_dev; unsigned long real_base; void __iomem *base; int error,i,last; DPRINTK(">eni_init\n"); dev->ci_range.vpi_bits = 0; dev->ci_range.vci_bits = NR_VCI_LD; dev->link_rate = ATM_OC3_PCR; eni_dev = ENI_DEV(dev); pci_dev = eni_dev->pci_dev; real_base = pci_resource_start(pci_dev, 0); eni_dev->irq = pci_dev->irq; if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, PCI_COMMAND_MEMORY | (eni_dev->asic ? PCI_COMMAND_PARITY | PCI_COMMAND_SERR : 0)))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory " "(0x%02x)\n",dev->number,error); return -EIO; } printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,", dev->number,pci_dev->revision,real_base,eni_dev->irq); if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) { printk("\n"); printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page " "mapping\n",dev->number); return error; } eni_dev->ioaddr = base; eni_dev->base_diff = real_base - (unsigned long) base; /* id may not be present in ASIC Tonga boards - check this @@@ */ if (!eni_dev->asic) { eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom)); if (readl(&eprom->magic) != ENI155_MAGIC) { printk("\n"); printk(KERN_ERR DEV_LABEL "(itf %d): bad magic - expected 0x%x, got 0x%x\n", dev->number, ENI155_MAGIC, (unsigned)readl(&eprom->magic)); error = -EINVAL; goto unmap; } } eni_dev->phy = base+PHY_BASE; eni_dev->reg = base+REG_BASE; eni_dev->ram = base+RAM_BASE; last = MAP_MAX_SIZE-RAM_BASE; for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { writel(0x55555555,eni_dev->ram+i); if (readl(eni_dev->ram+i) != 0x55555555) last = i; else { writel(0xAAAAAAAA,eni_dev->ram+i); if (readl(eni_dev->ram+i) != 0xAAAAAAAA) last = i; else writel(i,eni_dev->ram+i); } } for (i = 0; i < last; i += RAM_INCREMENT) if (readl(eni_dev->ram+i) != i) break; eni_dev->mem = i; memset_io(eni_dev->ram,0,eni_dev->mem); /* TODO: should shrink allocation now */ printk("mem=%dkB (",eni_dev->mem >> 10); /* TODO: check for non-SUNI, check for TAXI ? */ if (!(eni_in(MID_RES_ID_MCON) & 0x200) != !eni_dev->asic) { printk(")\n"); printk(KERN_ERR DEV_LABEL "(itf %d): ERROR - wrong id 0x%x\n", dev->number,(unsigned) eni_in(MID_RES_ID_MCON)); error = -EINVAL; goto unmap; } error = eni_dev->asic ? get_esi_asic(dev) : get_esi_fpga(dev,base); if (error) goto unmap; for (i = 0; i < ESI_LEN; i++) printk("%s%02X",i ? "-" : "",dev->esi[i]); printk(")\n"); printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number, eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA", media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]); error = suni_init(dev); if (error) goto unmap; out: return error; unmap: iounmap(base); goto out; } static void eni_do_release(struct atm_dev *dev) { struct eni_dev *ed = ENI_DEV(dev); dev->phy->stop(dev); dev->phy = NULL; iounmap(ed->ioaddr); } static int __devinit eni_start(struct atm_dev *dev) { struct eni_dev *eni_dev; void __iomem *buf; unsigned long buffer_mem; int error; DPRINTK(">eni_start\n"); eni_dev = ENI_DEV(dev); if (request_irq(eni_dev->irq,&eni_int,IRQF_SHARED,DEV_LABEL,dev)) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number,eni_dev->irq); error = -EAGAIN; goto out; } pci_set_master(eni_dev->pci_dev); if ((error = pci_write_config_word(eni_dev->pci_dev,PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | (eni_dev->asic ? PCI_COMMAND_PARITY | PCI_COMMAND_SERR : 0)))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+" "master (0x%02x)\n",dev->number,error); goto free_irq; } if ((error = pci_write_config_byte(eni_dev->pci_dev,PCI_TONGA_CTRL, END_SWAP_DMA))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't set endian swap " "(0x%02x)\n",dev->number,error); goto free_irq; } /* determine addresses of internal tables */ eni_dev->vci = eni_dev->ram; eni_dev->rx_dma = eni_dev->ram+NR_VCI*16; eni_dev->tx_dma = eni_dev->rx_dma+NR_DMA_RX*8; eni_dev->service = eni_dev->tx_dma+NR_DMA_TX*8; buf = eni_dev->service+NR_SERVICE*4; DPRINTK("vci 0x%lx,rx 0x%lx, tx 0x%lx,srv 0x%lx,buf 0x%lx\n", eni_dev->vci,eni_dev->rx_dma,eni_dev->tx_dma, eni_dev->service,buf); spin_lock_init(&eni_dev->lock); tasklet_init(&eni_dev->task,eni_tasklet,(unsigned long) dev); eni_dev->events = 0; /* initialize memory management */ buffer_mem = eni_dev->mem - (buf - eni_dev->ram); eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2; eni_dev->free_list = kmalloc( sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL); if (!eni_dev->free_list) { printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", dev->number); error = -ENOMEM; goto free_irq; } eni_dev->free_len = 0; eni_put_free(eni_dev,buf,buffer_mem); memset_io(eni_dev->vci,0,16*NR_VCI); /* clear VCI table */ /* * byte_addr free (k) * 0x00000000 512 VCI table * 0x00004000 496 RX DMA * 0x00005000 492 TX DMA * 0x00006000 488 service list * 0x00007000 484 buffers * 0x00080000 0 end (512kB) */ eni_out(0xffffffff,MID_IE); error = start_tx(dev); if (error) goto free_list; error = start_rx(dev); if (error) goto free_list; error = dev->phy->start(dev); if (error) goto free_list; eni_out(eni_in(MID_MC_S) | (1 << MID_INT_SEL_SHIFT) | MID_TX_LOCK_MODE | MID_DMA_ENABLE | MID_TX_ENABLE | MID_RX_ENABLE, MID_MC_S); /* Tonga uses SBus INTReq1 */ (void) eni_in(MID_ISA); /* clear Midway interrupts */ return 0; free_list: kfree(eni_dev->free_list); free_irq: free_irq(eni_dev->irq, dev); out: return error; } static void eni_close(struct atm_vcc *vcc) { DPRINTK(">eni_close\n"); if (!ENI_VCC(vcc)) return; clear_bit(ATM_VF_READY,&vcc->flags); close_rx(vcc); close_tx(vcc); DPRINTK("eni_close: done waiting\n"); /* deallocate memory */ kfree(ENI_VCC(vcc)); vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); /*foo();*/ } static int eni_open(struct atm_vcc *vcc) { struct eni_vcc *eni_vcc; int error; short vpi = vcc->vpi; int vci = vcc->vci; DPRINTK(">eni_open\n"); EVENT("eni_open\n",0,0); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) set_bit(ATM_VF_ADDR,&vcc->flags); if (vcc->qos.aal != ATM_AAL0 && vcc->qos.aal != ATM_AAL5) return -EINVAL; DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, vcc->vci); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { eni_vcc = kmalloc(sizeof(struct eni_vcc),GFP_KERNEL); if (!eni_vcc) return -ENOMEM; vcc->dev_data = eni_vcc; eni_vcc->tx = NULL; /* for eni_close after open_rx */ if ((error = open_rx_first(vcc))) { eni_close(vcc); return error; } if ((error = open_tx_first(vcc))) { eni_close(vcc); return error; } } if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; if ((error = open_rx_second(vcc))) { eni_close(vcc); return error; } if ((error = open_tx_second(vcc))) { eni_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); /* should power down SUNI while !ref_count @@@ */ return 0; } static int eni_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flgs) { struct eni_dev *eni_dev = ENI_DEV(vcc->dev); struct eni_tx *tx = ENI_VCC(vcc)->tx; struct sk_buff *skb; int error,rate,rsv,shp; if (qos->txtp.traffic_class == ATM_NONE) return 0; if (tx == eni_dev->ubr) return -EBADFD; rate = atm_pcr_goal(&qos->txtp); if (rate < 0) rate = -rate; rsv = shp = 0; if ((flgs & ATM_MF_DEC_RSV) && rate && rate < tx->reserved) rsv = 1; if ((flgs & ATM_MF_INC_RSV) && (!rate || rate > tx->reserved)) rsv = 1; if ((flgs & ATM_MF_DEC_SHP) && rate && rate < tx->shaping) shp = 1; if ((flgs & ATM_MF_INC_SHP) && (!rate || rate > tx->shaping)) shp = 1; if (!rsv && !shp) return 0; error = reserve_or_set_tx(vcc,&qos->txtp,rsv,shp); if (error) return error; if (shp && !(flgs & ATM_MF_IMMED)) return 0; /* * Walk through the send buffer and patch the rate information in all * segmentation buffer descriptors of this VCC. */ tasklet_disable(&eni_dev->task); skb_queue_walk(&eni_dev->tx_queue, skb) { void __iomem *dsc; if (ATM_SKB(skb)->vcc != vcc) continue; dsc = tx->send+ENI_PRV_POS(skb)*4; writel((readl(dsc) & ~(MID_SEG_RATE | MID_SEG_PR)) | (tx->prescaler << MID_SEG_PR_SHIFT) | (tx->resolution << MID_SEG_RATE_SHIFT), dsc); } tasklet_enable(&eni_dev->task); return 0; } static int eni_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { struct eni_dev *eni_dev = ENI_DEV(dev); if (cmd == ENI_MEMDUMP) { if (!capable(CAP_NET_ADMIN)) return -EPERM; printk(KERN_WARNING "Please use /proc/atm/" DEV_LABEL ":%d " "instead of obsolete ioctl ENI_MEMDUMP\n",dev->number); dump(dev); return 0; } if (cmd == ENI_SETMULT) { struct eni_multipliers mult; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&mult, arg, sizeof(struct eni_multipliers))) return -EFAULT; if ((mult.tx && mult.tx <= 100) || (mult.rx &&mult.rx <= 100) || mult.tx > 65536 || mult.rx > 65536) return -EINVAL; if (mult.tx) eni_dev->tx_mult = mult.tx; if (mult.rx) eni_dev->rx_mult = mult.rx; return 0; } if (cmd == ATM_SETCIRANGE) { struct atm_cirange ci; if (copy_from_user(&ci, arg,sizeof(struct atm_cirange))) return -EFAULT; if ((ci.vpi_bits == 0 || ci.vpi_bits == ATM_CI_MAX) && (ci.vci_bits == NR_VCI_LD || ci.vpi_bits == ATM_CI_MAX)) return 0; return -EINVAL; } if (!dev->phy->ioctl) return -ENOIOCTLCMD; return dev->phy->ioctl(dev,cmd,arg); } static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen) { return -EINVAL; } static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,unsigned int optlen) { return -EINVAL; } static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb) { enum enq_res res; DPRINTK(">eni_send\n"); if (!ENI_VCC(vcc)->tx) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } if (!skb) { printk(KERN_CRIT "!skb in eni_send ?\n"); if (vcc->pop) vcc->pop(vcc,skb); return -EINVAL; } if (vcc->qos.aal == ATM_AAL0) { if (skb->len != ATM_CELL_SIZE-1) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } *(u32 *) skb->data = htonl(*(u32 *) skb->data); } submitted++; ATM_SKB(skb)->vcc = vcc; tasklet_disable(&ENI_DEV(vcc->dev)->task); res = do_tx(skb); tasklet_enable(&ENI_DEV(vcc->dev)->task); if (res == enq_ok) return 0; skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb); backlogged++; tasklet_schedule(&ENI_DEV(vcc->dev)->task); return 0; } static void eni_phy_put(struct atm_dev *dev,unsigned char value, unsigned long addr) { writel(value,ENI_DEV(dev)->phy+addr*4); } static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr) { return readl(ENI_DEV(dev)->phy+addr*4); } static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page) { struct hlist_node *node; struct sock *s; static const char *signal[] = { "LOST","unknown","okay" }; struct eni_dev *eni_dev = ENI_DEV(dev); struct atm_vcc *vcc; int left,i; left = *pos; if (!left) return sprintf(page,DEV_LABEL "(itf %d) signal %s, %dkB, " "%d cps remaining\n",dev->number,signal[(int) dev->signal], eni_dev->mem >> 10,eni_dev->tx_bw); if (!--left) return sprintf(page,"%4sBursts: TX" #if !defined(CONFIG_ATM_ENI_BURST_TX_16W) && \ !defined(CONFIG_ATM_ENI_BURST_TX_8W) && \ !defined(CONFIG_ATM_ENI_BURST_TX_4W) && \ !defined(CONFIG_ATM_ENI_BURST_TX_2W) " none" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_16W " 16W" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_8W " 8W" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_4W " 4W" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_2W " 2W" #endif ", RX" #if !defined(CONFIG_ATM_ENI_BURST_RX_16W) && \ !defined(CONFIG_ATM_ENI_BURST_RX_8W) && \ !defined(CONFIG_ATM_ENI_BURST_RX_4W) && \ !defined(CONFIG_ATM_ENI_BURST_RX_2W) " none" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_16W " 16W" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_8W " 8W" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_4W " 4W" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_2W " 2W" #endif #ifndef CONFIG_ATM_ENI_TUNE_BURST " (default)" #endif "\n",""); if (!--left) return sprintf(page,"%4sBuffer multipliers: tx %d%%, rx %d%%\n", "",eni_dev->tx_mult,eni_dev->rx_mult); for (i = 0; i < NR_CHAN; i++) { struct eni_tx *tx = eni_dev->tx+i; if (!tx->send) continue; if (!--left) { return sprintf(page,"tx[%d]: 0x%ld-0x%ld " "(%6ld bytes), rsv %d cps, shp %d cps%s\n",i, (unsigned long) (tx->send - eni_dev->ram), tx->send-eni_dev->ram+tx->words*4-1,tx->words*4, tx->reserved,tx->shaping, tx == eni_dev->ubr ? " (UBR)" : ""); } if (--left) continue; return sprintf(page,"%10sbacklog %u packets\n","", skb_queue_len(&tx->backlog)); } read_lock(&vcc_sklist_lock); for(i = 0; i < VCC_HTABLE_SIZE; ++i) { struct hlist_head *head = &vcc_hash[i]; sk_for_each(s, node, head) { struct eni_vcc *eni_vcc; int length; vcc = atm_sk(s); if (vcc->dev != dev) continue; eni_vcc = ENI_VCC(vcc); if (--left) continue; length = sprintf(page,"vcc %4d: ",vcc->vci); if (eni_vcc->rx) { length += sprintf(page+length,"0x%ld-0x%ld " "(%6ld bytes)", (unsigned long) (eni_vcc->recv - eni_dev->ram), eni_vcc->recv-eni_dev->ram+eni_vcc->words*4-1, eni_vcc->words*4); if (eni_vcc->tx) length += sprintf(page+length,", "); } if (eni_vcc->tx) length += sprintf(page+length,"tx[%d], txing %d bytes", eni_vcc->tx->index,eni_vcc->txing); page[length] = '\n'; read_unlock(&vcc_sklist_lock); return length+1; } } read_unlock(&vcc_sklist_lock); for (i = 0; i < eni_dev->free_len; i++) { struct eni_free *fe = eni_dev->free_list+i; unsigned long offset; if (--left) continue; offset = (unsigned long) eni_dev->ram+eni_dev->base_diff; return sprintf(page,"free %p-%p (%6d bytes)\n", fe->start-offset,fe->start-offset+(1 << fe->order)-1, 1 << fe->order); } return 0; } static const struct atmdev_ops ops = { .open = eni_open, .close = eni_close, .ioctl = eni_ioctl, .getsockopt = eni_getsockopt, .setsockopt = eni_setsockopt, .send = eni_send, .phy_put = eni_phy_put, .phy_get = eni_phy_get, .change_qos = eni_change_qos, .proc_read = eni_proc_read }; static int __devinit eni_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { struct atm_dev *dev; struct eni_dev *eni_dev; struct eni_zero *zero; int rc; rc = pci_enable_device(pci_dev); if (rc < 0) goto out; rc = -ENOMEM; eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); if (!eni_dev) goto err_disable; zero = &eni_dev->zero; zero->addr = pci_alloc_consistent(pci_dev, ENI_ZEROES_SIZE, &zero->dma); if (!zero->addr) goto err_kfree; dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto err_free_consistent; dev->dev_data = eni_dev; pci_set_drvdata(pci_dev, dev); eni_dev->pci_dev = pci_dev; eni_dev->asic = ent->driver_data; rc = eni_do_init(dev); if (rc < 0) goto err_unregister; rc = eni_start(dev); if (rc < 0) goto err_eni_release; eni_dev->more = eni_boards; eni_boards = dev; out: return rc; err_eni_release: eni_do_release(dev); err_unregister: atm_dev_deregister(dev); err_free_consistent: pci_free_consistent(pci_dev, ENI_ZEROES_SIZE, zero->addr, zero->dma); err_kfree: kfree(eni_dev); err_disable: pci_disable_device(pci_dev); goto out; } static struct pci_device_id eni_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ }, { 0, } }; MODULE_DEVICE_TABLE(pci,eni_pci_tbl); static void __devexit eni_remove_one(struct pci_dev *pdev) { struct atm_dev *dev = pci_get_drvdata(pdev); struct eni_dev *ed = ENI_DEV(dev); struct eni_zero *zero = &ed->zero; eni_do_release(dev); atm_dev_deregister(dev); pci_free_consistent(pdev, ENI_ZEROES_SIZE, zero->addr, zero->dma); kfree(ed); pci_disable_device(pdev); } static struct pci_driver eni_driver = { .name = DEV_LABEL, .id_table = eni_pci_tbl, .probe = eni_init_one, .remove = __devexit_p(eni_remove_one), }; static int __init eni_init(void) { struct sk_buff *skb; /* dummy for sizeof */ if (sizeof(skb->cb) < sizeof(struct eni_skb_prv)) { printk(KERN_ERR "eni_detect: skb->cb is too small (%Zd < %Zd)\n", sizeof(skb->cb),sizeof(struct eni_skb_prv)); return -EIO; } return pci_register_driver(&eni_driver); } module_init(eni_init); /* @@@ since exit routine not defined, this module can not be unloaded */ MODULE_LICENSE("GPL");
gpl-2.0
stedman420/android_kernel_zte_hera
drivers/atm/eni.c
4922
62738
/* drivers/atm/eni.c - Efficient Networks ENI155P device driver */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/atm_eni.h> #include <linux/bitops.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include <asm/string.h> #include <asm/byteorder.h> #include "tonga.h" #include "midway.h" #include "suni.h" #include "eni.h" #if !defined(__i386__) && !defined(__x86_64__) #ifndef ioremap_nocache #define ioremap_nocache(X,Y) ioremap(X,Y) #endif #endif /* * TODO: * * Show stoppers * none * * Minor * - OAM support * - fix bugs listed below */ /* * KNOWN BUGS: * * - may run into JK-JK bug and deadlock * - should allocate UBR channel first * - buffer space allocation algorithm is stupid * (RX: should be maxSDU+maxdelay*rate * TX: should be maxSDU+min(maxSDU,maxdelay*rate) ) * - doesn't support OAM cells * - eni_put_free may hang if not putting memory fragments that _complete_ * 2^n block (never happens in real life, though) */ #if 0 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif #ifndef CONFIG_ATM_ENI_TUNE_BURST #define CONFIG_ATM_ENI_BURST_TX_8W #define CONFIG_ATM_ENI_BURST_RX_4W #endif #ifndef CONFIG_ATM_ENI_DEBUG #define NULLCHECK(x) #define EVENT(s,a,b) static void event_dump(void) { } #else /* * NULL pointer checking */ #define NULLCHECK(x) \ if ((unsigned long) (x) < 0x30) \ printk(KERN_CRIT #x "==0x%lx\n",(unsigned long) (x)) /* * Very extensive activity logging. Greatly improves bug detection speed but * costs a few Mbps if enabled. */ #define EV 64 static const char *ev[EV]; static unsigned long ev_a[EV],ev_b[EV]; static int ec = 0; static void EVENT(const char *s,unsigned long a,unsigned long b) { ev[ec] = s; ev_a[ec] = a; ev_b[ec] = b; ec = (ec+1) % EV; } static void event_dump(void) { int n,i; for (n = 0; n < EV; n++) { i = (ec+n) % EV; printk(KERN_NOTICE); printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); } } #endif /* CONFIG_ATM_ENI_DEBUG */ /* * NExx must not be equal at end * EExx may be equal at end * xxPJOK verify validity of pointer jumps * xxPMOK operating on a circular buffer of "c" words */ #define NEPJOK(a0,a1,b) \ ((a0) < (a1) ? (b) <= (a0) || (b) > (a1) : (b) <= (a0) && (b) > (a1)) #define EEPJOK(a0,a1,b) \ ((a0) < (a1) ? (b) < (a0) || (b) >= (a1) : (b) < (a0) && (b) >= (a1)) #define NEPMOK(a0,d,b,c) NEPJOK(a0,(a0+d) & (c-1),b) #define EEPMOK(a0,d,b,c) EEPJOK(a0,(a0+d) & (c-1),b) static int tx_complete = 0,dma_complete = 0,queued = 0,requeued = 0, backlogged = 0,rx_enqueued = 0,rx_dequeued = 0,pushed = 0,submitted = 0, putting = 0; static struct atm_dev *eni_boards = NULL; /* Read/write registers on card */ #define eni_in(r) readl(eni_dev->reg+(r)*4) #define eni_out(v,r) writel((v),eni_dev->reg+(r)*4) /*-------------------------------- utilities --------------------------------*/ static void dump_mem(struct eni_dev *eni_dev) { int i; for (i = 0; i < eni_dev->free_len; i++) printk(KERN_DEBUG " %d: %p %d\n",i, eni_dev->free_list[i].start, 1 << eni_dev->free_list[i].order); } static void dump(struct atm_dev *dev) { struct eni_dev *eni_dev; int i; eni_dev = ENI_DEV(dev); printk(KERN_NOTICE "Free memory\n"); dump_mem(eni_dev); printk(KERN_NOTICE "TX buffers\n"); for (i = 0; i < NR_CHAN; i++) if (eni_dev->tx[i].send) printk(KERN_NOTICE " TX %d @ %p: %ld\n",i, eni_dev->tx[i].send,eni_dev->tx[i].words*4); printk(KERN_NOTICE "RX buffers\n"); for (i = 0; i < 1024; i++) if (eni_dev->rx_map[i] && ENI_VCC(eni_dev->rx_map[i])->rx) printk(KERN_NOTICE " RX %d @ %p: %ld\n",i, ENI_VCC(eni_dev->rx_map[i])->recv, ENI_VCC(eni_dev->rx_map[i])->words*4); printk(KERN_NOTICE "----\n"); } static void eni_put_free(struct eni_dev *eni_dev, void __iomem *start, unsigned long size) { struct eni_free *list; int len,order; DPRINTK("init 0x%lx+%ld(0x%lx)\n",start,size,size); start += eni_dev->base_diff; list = eni_dev->free_list; len = eni_dev->free_len; while (size) { if (len >= eni_dev->free_list_size) { printk(KERN_CRIT "eni_put_free overflow (%p,%ld)\n", start,size); break; } for (order = 0; !(((unsigned long)start | size) & (1 << order)); order++); if (MID_MIN_BUF_SIZE > (1 << order)) { printk(KERN_CRIT "eni_put_free: order %d too small\n", order); break; } list[len].start = (void __iomem *) start; list[len].order = order; len++; start += 1 << order; size -= 1 << order; } eni_dev->free_len = len; /*dump_mem(eni_dev);*/ } static void __iomem *eni_alloc_mem(struct eni_dev *eni_dev, unsigned long *size) { struct eni_free *list; void __iomem *start; int len,i,order,best_order,index; list = eni_dev->free_list; len = eni_dev->free_len; if (*size < MID_MIN_BUF_SIZE) *size = MID_MIN_BUF_SIZE; if (*size > MID_MAX_BUF_SIZE) return NULL; for (order = 0; (1 << order) < *size; order++); DPRINTK("trying: %ld->%d\n",*size,order); best_order = 65; /* we don't have more than 2^64 of anything ... */ index = 0; /* silence GCC */ for (i = 0; i < len; i++) if (list[i].order == order) { best_order = order; index = i; break; } else if (best_order > list[i].order && list[i].order > order) { best_order = list[i].order; index = i; } if (best_order == 65) return NULL; start = list[index].start-eni_dev->base_diff; list[index] = list[--len]; eni_dev->free_len = len; *size = 1 << order; eni_put_free(eni_dev,start+*size,(1 << best_order)-*size); DPRINTK("%ld bytes (order %d) at 0x%lx\n",*size,order,start); memset_io(start,0,*size); /* never leak data */ /*dump_mem(eni_dev);*/ return start; } static void eni_free_mem(struct eni_dev *eni_dev, void __iomem *start, unsigned long size) { struct eni_free *list; int len,i,order; start += eni_dev->base_diff; list = eni_dev->free_list; len = eni_dev->free_len; for (order = -1; size; order++) size >>= 1; DPRINTK("eni_free_mem: %p+0x%lx (order %d)\n",start,size,order); for (i = 0; i < len; i++) if (((unsigned long) list[i].start) == ((unsigned long)start^(1 << order)) && list[i].order == order) { DPRINTK("match[%d]: 0x%lx/0x%lx(0x%x), %d/%d\n",i, list[i].start,start,1 << order,list[i].order,order); list[i] = list[--len]; start = (void __iomem *) ((unsigned long) start & ~(unsigned long) (1 << order)); order++; i = -1; continue; } if (len >= eni_dev->free_list_size) { printk(KERN_ALERT "eni_free_mem overflow (%p,%d)\n",start, order); return; } list[len].start = start; list[len].order = order; eni_dev->free_len = len+1; /*dump_mem(eni_dev);*/ } /*----------------------------------- RX ------------------------------------*/ #define ENI_VCC_NOS ((struct atm_vcc *) 1) static void rx_ident_err(struct atm_vcc *vcc) { struct atm_dev *dev; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; dev = vcc->dev; eni_dev = ENI_DEV(dev); /* immediately halt adapter */ eni_out(eni_in(MID_MC_S) & ~(MID_DMA_ENABLE | MID_TX_ENABLE | MID_RX_ENABLE),MID_MC_S); /* dump useful information */ eni_vcc = ENI_VCC(vcc); printk(KERN_ALERT DEV_LABEL "(itf %d): driver error - RX ident " "mismatch\n",dev->number); printk(KERN_ALERT " VCI %d, rxing %d, words %ld\n",vcc->vci, eni_vcc->rxing,eni_vcc->words); printk(KERN_ALERT " host descr 0x%lx, rx pos 0x%lx, descr value " "0x%x\n",eni_vcc->descr,eni_vcc->rx_pos, (unsigned) readl(eni_vcc->recv+eni_vcc->descr*4)); printk(KERN_ALERT " last %p, servicing %d\n",eni_vcc->last, eni_vcc->servicing); EVENT("---dump ends here---\n",0,0); printk(KERN_NOTICE "---recent events---\n"); event_dump(); ENI_DEV(dev)->fast = NULL; /* really stop it */ ENI_DEV(dev)->slow = NULL; skb_queue_head_init(&ENI_DEV(dev)->rx_queue); } static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, unsigned long skip,unsigned long size,unsigned long eff) { struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; u32 dma_rd,dma_wr; u32 dma[RX_DMA_BUF*2]; dma_addr_t paddr; unsigned long here; int i,j; eni_dev = ENI_DEV(vcc->dev); eni_vcc = ENI_VCC(vcc); paddr = 0; /* GCC, shut up */ if (skb) { paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len, PCI_DMA_FROMDEVICE); ENI_PRV_PADDR(skb) = paddr; if (paddr & 3) printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d has " "mis-aligned RX data (0x%lx)\n",vcc->dev->number, vcc->vci,(unsigned long) paddr); ENI_PRV_SIZE(skb) = size+skip; /* PDU plus descriptor */ ATM_SKB(skb)->vcc = vcc; } j = 0; if ((eff && skip) || 1) { /* @@@ actually, skip is always == 1 ... */ here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; j++; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; else { unsigned long words; if (!size) { DPRINTK("strange things happen ...\n"); EVENT("strange things happen ... (skip=%ld,eff=%ld)\n", size,eff); } words = eff; if (paddr & 15) { unsigned long init; init = 4-((paddr & 15) >> 2); if (init > words) init = words; dma[j++] = MID_DT_WORD | (init << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += init << 2; words -= init; } #ifdef CONFIG_ATM_ENI_BURST_RX_16W /* may work with some PCI chipsets ... */ if (words & ~15) { dma[j++] = MID_DT_16W | ((words >> 4) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~15) << 2; words &= 15; } #endif #ifdef CONFIG_ATM_ENI_BURST_RX_8W /* works only with *some* PCI chipsets ... */ if (words & ~7) { dma[j++] = MID_DT_8W | ((words >> 3) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~7) << 2; words &= 7; } #endif #ifdef CONFIG_ATM_ENI_BURST_RX_4W /* recommended */ if (words & ~3) { dma[j++] = MID_DT_4W | ((words >> 2) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~3) << 2; words &= 3; } #endif #ifdef CONFIG_ATM_ENI_BURST_RX_2W /* probably useless if RX_4W, RX_8W, ... */ if (words & ~1) { dma[j++] = MID_DT_2W | ((words >> 1) << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; paddr += (words & ~1) << 2; words &= 1; } #endif if (words) { dma[j++] = MID_DT_WORD | (words << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT); dma[j++] = paddr; } } if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; j++; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); goto trouble; } dma[j-2] |= MID_DMA_END; j = j >> 1; dma_wr = eni_in(MID_DMA_WR_RX); dma_rd = eni_in(MID_DMA_RD_RX); /* * Can I move the dma_wr pointer by 2j+1 positions without overwriting * data that hasn't been read (position of dma_rd) yet ? */ if (!NEPMOK(dma_wr,j+j+1,dma_rd,NR_DMA_RX)) { /* @@@ +1 is ugly */ printk(KERN_WARNING DEV_LABEL "(itf %d): RX DMA full\n", vcc->dev->number); goto trouble; } for (i = 0; i < j; i++) { writel(dma[i*2],eni_dev->rx_dma+dma_wr*8); writel(dma[i*2+1],eni_dev->rx_dma+dma_wr*8+4); dma_wr = (dma_wr+1) & (NR_DMA_RX-1); } if (skb) { ENI_PRV_POS(skb) = eni_vcc->descr+size+1; skb_queue_tail(&eni_dev->rx_queue,skb); eni_vcc->last = skb; rx_enqueued++; } eni_vcc->descr = here; eni_out(dma_wr,MID_DMA_WR_RX); return 0; trouble: if (paddr) pci_unmap_single(eni_dev->pci_dev,paddr,skb->len, PCI_DMA_FROMDEVICE); if (skb) dev_kfree_skb_irq(skb); return -1; } static void discard(struct atm_vcc *vcc,unsigned long size) { struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); EVENT("discard (size=%ld)\n",size,0); while (do_rx_dma(vcc,NULL,1,size,0)) EVENT("BUSY LOOP",0,0); /* could do a full fallback, but that might be more expensive */ if (eni_vcc->rxing) ENI_PRV_POS(eni_vcc->last) += size+1; else eni_vcc->rx_pos = (eni_vcc->rx_pos+size+1) & (eni_vcc->words-1); } /* * TODO: should check whether direct copies (without DMA setup, dequeuing on * interrupt, etc.) aren't much faster for AAL0 */ static int rx_aal0(struct atm_vcc *vcc) { struct eni_vcc *eni_vcc; unsigned long descr; unsigned long length; struct sk_buff *skb; DPRINTK(">rx_aal0\n"); eni_vcc = ENI_VCC(vcc); descr = readl(eni_vcc->recv+eni_vcc->descr*4); if ((descr & MID_RED_IDEN) != (MID_RED_RX_ID << MID_RED_SHIFT)) { rx_ident_err(vcc); return 1; } if (descr & MID_RED_T) { DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); length = 0; atomic_inc(&vcc->stats->rx_err); } else { length = ATM_CELL_SIZE-1; /* no HEC */ } skb = length ? atm_alloc_charge(vcc,length,GFP_ATOMIC) : NULL; if (!skb) { discard(vcc,length >> 2); return 0; } skb_put(skb,length); skb->tstamp = eni_vcc->timestamp; DPRINTK("got len %ld\n",length); if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; eni_vcc->rxing++; return 0; } static int rx_aal5(struct atm_vcc *vcc) { struct eni_vcc *eni_vcc; unsigned long descr; unsigned long size,eff,length; struct sk_buff *skb; EVENT("rx_aal5\n",0,0); DPRINTK(">rx_aal5\n"); eni_vcc = ENI_VCC(vcc); descr = readl(eni_vcc->recv+eni_vcc->descr*4); if ((descr & MID_RED_IDEN) != (MID_RED_RX_ID << MID_RED_SHIFT)) { rx_ident_err(vcc); return 1; } if (descr & (MID_RED_T | MID_RED_CRC_ERR)) { if (descr & MID_RED_T) { EVENT("empty cell (descr=0x%lx)\n",descr,0); DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); size = 0; } else { static unsigned long silence = 0; if (time_after(jiffies, silence) || silence == 0) { printk(KERN_WARNING DEV_LABEL "(itf %d): " "discarding PDU(s) with CRC error\n", vcc->dev->number); silence = (jiffies+2*HZ)|1; } size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); EVENT("CRC error (descr=0x%lx,size=%ld)\n",descr, size); } eff = length = 0; atomic_inc(&vcc->stats->rx_err); } else { size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); DPRINTK("size=%ld\n",size); length = readl(eni_vcc->recv+(((eni_vcc->descr+size-1) & (eni_vcc->words-1)))*4) & 0xffff; /* -trailer(2)+header(1) */ if (length && length <= (size << 2)-8 && length <= ATM_MAX_AAL5_PDU) eff = (length+3) >> 2; else { /* ^ trailer length (8) */ EVENT("bad PDU (descr=0x08%lx,length=%ld)\n",descr, length); printk(KERN_ERR DEV_LABEL "(itf %d): bad AAL5 PDU " "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", vcc->dev->number,vcc->vci,length,size << 2,descr); length = eff = 0; atomic_inc(&vcc->stats->rx_err); } } skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; if (!skb) { discard(vcc,size); return 0; } skb_put(skb,length); DPRINTK("got len %ld\n",length); if (do_rx_dma(vcc,skb,1,size,eff)) return 1; eni_vcc->rxing++; return 0; } static inline int rx_vcc(struct atm_vcc *vcc) { void __iomem *vci_dsc; unsigned long tmp; struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); vci_dsc = ENI_DEV(vcc->dev)->vci+vcc->vci*16; EVENT("rx_vcc(1)\n",0,0); while (eni_vcc->descr != (tmp = (readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)) { EVENT("rx_vcc(2: host dsc=0x%lx, nic dsc=0x%lx)\n", eni_vcc->descr,tmp); DPRINTK("CB_DESCR %ld REG_DESCR %d\n",ENI_VCC(vcc)->descr, (((unsigned) readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)); if (ENI_VCC(vcc)->rx(vcc)) return 1; } /* clear IN_SERVICE flag */ writel(readl(vci_dsc) & ~MID_VCI_IN_SERVICE,vci_dsc); /* * If new data has arrived between evaluating the while condition and * clearing IN_SERVICE, we wouldn't be notified until additional data * follows. So we have to loop again to be sure. */ EVENT("rx_vcc(3)\n",0,0); while (ENI_VCC(vcc)->descr != (tmp = (readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)) { EVENT("rx_vcc(4: host dsc=0x%lx, nic dsc=0x%lx)\n", eni_vcc->descr,tmp); DPRINTK("CB_DESCR %ld REG_DESCR %d\n",ENI_VCC(vcc)->descr, (((unsigned) readl(vci_dsc+4) & MID_VCI_DESCR) >> MID_VCI_DESCR_SHIFT)); if (ENI_VCC(vcc)->rx(vcc)) return 1; } return 0; } static void poll_rx(struct atm_dev *dev) { struct eni_dev *eni_dev; struct atm_vcc *curr; eni_dev = ENI_DEV(dev); while ((curr = eni_dev->fast)) { EVENT("poll_rx.fast\n",0,0); if (rx_vcc(curr)) return; eni_dev->fast = ENI_VCC(curr)->next; ENI_VCC(curr)->next = ENI_VCC_NOS; barrier(); ENI_VCC(curr)->servicing--; } while ((curr = eni_dev->slow)) { EVENT("poll_rx.slow\n",0,0); if (rx_vcc(curr)) return; eni_dev->slow = ENI_VCC(curr)->next; ENI_VCC(curr)->next = ENI_VCC_NOS; barrier(); ENI_VCC(curr)->servicing--; } } static void get_service(struct atm_dev *dev) { struct eni_dev *eni_dev; struct atm_vcc *vcc; unsigned long vci; DPRINTK(">get_service\n"); eni_dev = ENI_DEV(dev); while (eni_in(MID_SERV_WRITE) != eni_dev->serv_read) { vci = readl(eni_dev->service+eni_dev->serv_read*4); eni_dev->serv_read = (eni_dev->serv_read+1) & (NR_SERVICE-1); vcc = eni_dev->rx_map[vci & 1023]; if (!vcc) { printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %ld not " "found\n",dev->number,vci); continue; /* nasty but we try to go on anyway */ /* @@@ nope, doesn't work */ } EVENT("getting from service\n",0,0); if (ENI_VCC(vcc)->next != ENI_VCC_NOS) { EVENT("double service\n",0,0); DPRINTK("Grr, servicing VCC %ld twice\n",vci); continue; } ENI_VCC(vcc)->timestamp = ktime_get_real(); ENI_VCC(vcc)->next = NULL; if (vcc->qos.rxtp.traffic_class == ATM_CBR) { if (eni_dev->fast) ENI_VCC(eni_dev->last_fast)->next = vcc; else eni_dev->fast = vcc; eni_dev->last_fast = vcc; } else { if (eni_dev->slow) ENI_VCC(eni_dev->last_slow)->next = vcc; else eni_dev->slow = vcc; eni_dev->last_slow = vcc; } putting++; ENI_VCC(vcc)->servicing++; } } static void dequeue_rx(struct atm_dev *dev) { struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; struct atm_vcc *vcc; struct sk_buff *skb; void __iomem *vci_dsc; int first; eni_dev = ENI_DEV(dev); first = 1; while (1) { skb = skb_dequeue(&eni_dev->rx_queue); if (!skb) { if (first) { DPRINTK(DEV_LABEL "(itf %d): RX but not " "rxing\n",dev->number); EVENT("nothing to dequeue\n",0,0); } break; } EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb), ENI_PRV_POS(skb)); rx_dequeued++; vcc = ATM_SKB(skb)->vcc; eni_vcc = ENI_VCC(vcc); first = 0; vci_dsc = eni_dev->vci+vcc->vci*16; if (!EEPMOK(eni_vcc->rx_pos,ENI_PRV_SIZE(skb), (readl(vci_dsc+4) & MID_VCI_READ) >> MID_VCI_READ_SHIFT, eni_vcc->words)) { EVENT("requeuing\n",0,0); skb_queue_head(&eni_dev->rx_queue,skb); break; } eni_vcc->rxing--; eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1); pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len, PCI_DMA_TODEVICE); if (!skb->len) dev_kfree_skb_irq(skb); else { EVENT("pushing (len=%ld)\n",skb->len,0); if (vcc->qos.aal == ATM_AAL0) *(unsigned long *) skb->data = ntohl(*(unsigned long *) skb->data); memset(skb->cb,0,sizeof(struct eni_skb_prv)); vcc->push(vcc,skb); pushed++; } atomic_inc(&vcc->stats->rx); } wake_up(&eni_dev->rx_wait); } static int open_rx_first(struct atm_vcc *vcc) { struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; unsigned long size; DPRINTK("open_rx_first\n"); eni_dev = ENI_DEV(vcc->dev); eni_vcc = ENI_VCC(vcc); eni_vcc->rx = NULL; if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; size = vcc->qos.rxtp.max_sdu*eni_dev->rx_mult/100; if (size > MID_MAX_BUF_SIZE && vcc->qos.rxtp.max_sdu <= MID_MAX_BUF_SIZE) size = MID_MAX_BUF_SIZE; eni_vcc->recv = eni_alloc_mem(eni_dev,&size); DPRINTK("rx at 0x%lx\n",eni_vcc->recv); eni_vcc->words = size >> 2; if (!eni_vcc->recv) return -ENOBUFS; eni_vcc->rx = vcc->qos.aal == ATM_AAL5 ? rx_aal5 : rx_aal0; eni_vcc->descr = 0; eni_vcc->rx_pos = 0; eni_vcc->rxing = 0; eni_vcc->servicing = 0; eni_vcc->next = ENI_VCC_NOS; return 0; } static int open_rx_second(struct atm_vcc *vcc) { void __iomem *here; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; unsigned long size; int order; DPRINTK("open_rx_second\n"); eni_dev = ENI_DEV(vcc->dev); eni_vcc = ENI_VCC(vcc); if (!eni_vcc->rx) return 0; /* set up VCI descriptor */ here = eni_dev->vci+vcc->vci*16; DPRINTK("loc 0x%x\n",(unsigned) (eni_vcc->recv-eni_dev->ram)/4); size = eni_vcc->words >> 8; for (order = -1; size; order++) size >>= 1; writel(0,here+4); /* descr, read = 0 */ writel(0,here+8); /* write, state, count = 0 */ if (eni_dev->rx_map[vcc->vci]) printk(KERN_CRIT DEV_LABEL "(itf %d): BUG - VCI %d already " "in use\n",vcc->dev->number,vcc->vci); eni_dev->rx_map[vcc->vci] = vcc; /* now it counts */ writel(((vcc->qos.aal != ATM_AAL5 ? MID_MODE_RAW : MID_MODE_AAL5) << MID_VCI_MODE_SHIFT) | MID_VCI_PTI_MODE | (((eni_vcc->recv-eni_dev->ram) >> (MID_LOC_SKIP+2)) << MID_VCI_LOCATION_SHIFT) | (order << MID_VCI_SIZE_SHIFT),here); return 0; } static void close_rx(struct atm_vcc *vcc) { DECLARE_WAITQUEUE(wait,current); void __iomem *here; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); if (!eni_vcc->rx) return; eni_dev = ENI_DEV(vcc->dev); if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { here = eni_dev->vci+vcc->vci*16; /* block receiver */ writel((readl(here) & ~MID_VCI_MODE) | (MID_MODE_TRASH << MID_VCI_MODE_SHIFT),here); /* wait for receiver to become idle */ udelay(27); /* discard pending cell */ writel(readl(here) & ~MID_VCI_IN_SERVICE,here); /* don't accept any new ones */ eni_dev->rx_map[vcc->vci] = NULL; /* wait for RX queue to drain */ DPRINTK("eni_close: waiting for RX ...\n"); EVENT("RX closing\n",0,0); add_wait_queue(&eni_dev->rx_wait,&wait); set_current_state(TASK_UNINTERRUPTIBLE); barrier(); for (;;) { /* transition service->rx: rxing++, servicing-- */ if (!eni_vcc->servicing) { barrier(); if (!eni_vcc->rxing) break; } EVENT("drain PDUs (rx %ld, serv %ld)\n",eni_vcc->rxing, eni_vcc->servicing); printk(KERN_INFO "%d+%d RX left\n",eni_vcc->servicing, eni_vcc->rxing); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } for (;;) { int at_end; u32 tmp; tasklet_disable(&eni_dev->task); tmp = readl(eni_dev->vci+vcc->vci*16+4) & MID_VCI_READ; at_end = eni_vcc->rx_pos == tmp >> MID_VCI_READ_SHIFT; tasklet_enable(&eni_dev->task); if (at_end) break; EVENT("drain discard (host 0x%lx, nic 0x%lx)\n", eni_vcc->rx_pos,tmp); printk(KERN_INFO "draining RX: host 0x%lx, nic 0x%x\n", eni_vcc->rx_pos,tmp); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&eni_dev->rx_wait,&wait); } eni_free_mem(eni_dev,eni_vcc->recv,eni_vcc->words << 2); eni_vcc->rx = NULL; } static int start_rx(struct atm_dev *dev) { struct eni_dev *eni_dev; eni_dev = ENI_DEV(dev); eni_dev->rx_map = (struct atm_vcc **) get_zeroed_page(GFP_KERNEL); if (!eni_dev->rx_map) { printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", dev->number); free_page((unsigned long) eni_dev->free_list); return -ENOMEM; } eni_dev->rx_mult = DEFAULT_RX_MULT; eni_dev->fast = eni_dev->last_fast = NULL; eni_dev->slow = eni_dev->last_slow = NULL; init_waitqueue_head(&eni_dev->rx_wait); skb_queue_head_init(&eni_dev->rx_queue); eni_dev->serv_read = eni_in(MID_SERV_WRITE); eni_out(0,MID_DMA_WR_RX); return 0; } /*----------------------------------- TX ------------------------------------*/ enum enq_res { enq_ok,enq_next,enq_jam }; static inline void put_dma(int chan,u32 *dma,int *j,dma_addr_t paddr, u32 size) { u32 init,words; DPRINTK("put_dma: 0x%lx+0x%x\n",(unsigned long) paddr,size); EVENT("put_dma: 0x%lx+0x%lx\n",(unsigned long) paddr,size); #if 0 /* don't complain anymore */ if (paddr & 3) printk(KERN_ERR "put_dma: unaligned addr (0x%lx)\n",paddr); if (size & 3) printk(KERN_ERR "put_dma: unaligned size (0x%lx)\n",size); #endif if (paddr & 3) { init = 4-(paddr & 3); if (init > size || size < 7) init = size; DPRINTK("put_dma: %lx DMA: %d/%d bytes\n", (unsigned long) paddr,init,size); dma[(*j)++] = MID_DT_BYTE | (init << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += init; size -= init; } words = size >> 2; size &= 3; if (words && (paddr & 31)) { init = 8-((paddr & 31) >> 2); if (init > words) init = words; DPRINTK("put_dma: %lx DMA: %d/%d words\n", (unsigned long) paddr,init,words); dma[(*j)++] = MID_DT_WORD | (init << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += init << 2; words -= init; } #ifdef CONFIG_ATM_ENI_BURST_TX_16W /* may work with some PCI chipsets ... */ if (words & ~15) { DPRINTK("put_dma: %lx DMA: %d*16/%d words\n", (unsigned long) paddr,words >> 4,words); dma[(*j)++] = MID_DT_16W | ((words >> 4) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~15) << 2; words &= 15; } #endif #ifdef CONFIG_ATM_ENI_BURST_TX_8W /* recommended */ if (words & ~7) { DPRINTK("put_dma: %lx DMA: %d*8/%d words\n", (unsigned long) paddr,words >> 3,words); dma[(*j)++] = MID_DT_8W | ((words >> 3) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~7) << 2; words &= 7; } #endif #ifdef CONFIG_ATM_ENI_BURST_TX_4W /* probably useless if TX_8W or TX_16W */ if (words & ~3) { DPRINTK("put_dma: %lx DMA: %d*4/%d words\n", (unsigned long) paddr,words >> 2,words); dma[(*j)++] = MID_DT_4W | ((words >> 2) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~3) << 2; words &= 3; } #endif #ifdef CONFIG_ATM_ENI_BURST_TX_2W /* probably useless if TX_4W, TX_8W, ... */ if (words & ~1) { DPRINTK("put_dma: %lx DMA: %d*2/%d words\n", (unsigned long) paddr,words >> 1,words); dma[(*j)++] = MID_DT_2W | ((words >> 1) << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += (words & ~1) << 2; words &= 1; } #endif if (words) { DPRINTK("put_dma: %lx DMA: %d words\n",(unsigned long) paddr, words); dma[(*j)++] = MID_DT_WORD | (words << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; paddr += words << 2; } if (size) { DPRINTK("put_dma: %lx DMA: %d bytes\n",(unsigned long) paddr, size); dma[(*j)++] = MID_DT_BYTE | (size << MID_DMA_COUNT_SHIFT) | (chan << MID_DMA_CHAN_SHIFT); dma[(*j)++] = paddr; } } static enum enq_res do_tx(struct sk_buff *skb) { struct atm_vcc *vcc; struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; struct eni_tx *tx; dma_addr_t paddr; u32 dma_rd,dma_wr; u32 size; /* in words */ int aal5,dma_size,i,j; DPRINTK(">do_tx\n"); NULLCHECK(skb); EVENT("do_tx: skb=0x%lx, %ld bytes\n",(unsigned long) skb,skb->len); vcc = ATM_SKB(skb)->vcc; NULLCHECK(vcc); eni_dev = ENI_DEV(vcc->dev); NULLCHECK(eni_dev); eni_vcc = ENI_VCC(vcc); tx = eni_vcc->tx; NULLCHECK(tx); #if 0 /* Enable this for testing with the "align" program */ { unsigned int hack = *((char *) skb->data)-'0'; if (hack < 8) { skb->data += hack; skb->len -= hack; } } #endif #if 0 /* should work now */ if ((unsigned long) skb->data & 3) printk(KERN_ERR DEV_LABEL "(itf %d): VCI %d has mis-aligned " "TX data\n",vcc->dev->number,vcc->vci); #endif /* * Potential future IP speedup: make hard_header big enough to put * segmentation descriptor directly into PDU. Saves: 4 slave writes, * 1 DMA xfer & 2 DMA'ed bytes (protocol layering is for wimps :-) */ aal5 = vcc->qos.aal == ATM_AAL5; /* check space in buffer */ if (!aal5) size = (ATM_CELL_PAYLOAD >> 2)+TX_DESCR_SIZE; /* cell without HEC plus segmentation header (includes four-byte cell header) */ else { size = skb->len+4*AAL5_TRAILER+ATM_CELL_PAYLOAD-1; /* add AAL5 trailer */ size = ((size-(size % ATM_CELL_PAYLOAD)) >> 2)+TX_DESCR_SIZE; /* add segmentation header */ } /* * Can I move tx_pos by size bytes without getting closer than TX_GAP * to the read pointer ? TX_GAP means to leave some space for what * the manual calls "too close". */ if (!NEPMOK(tx->tx_pos,size+TX_GAP, eni_in(MID_TX_RDPTR(tx->index)),tx->words)) { DPRINTK(DEV_LABEL "(itf %d): TX full (size %d)\n", vcc->dev->number,size); return enq_next; } /* check DMA */ dma_wr = eni_in(MID_DMA_WR_TX); dma_rd = eni_in(MID_DMA_RD_TX); dma_size = 3; /* JK for descriptor and final fill, plus final size mis-alignment fix */ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); if (!skb_shinfo(skb)->nr_frags) dma_size += 5; else dma_size += 5*(skb_shinfo(skb)->nr_frags+1); if (dma_size > TX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "(itf %d): needs %d DMA entries " "(got only %d)\n",vcc->dev->number,dma_size,TX_DMA_BUF); } DPRINTK("dma_wr is %d, tx_pos is %ld\n",dma_wr,tx->tx_pos); if (dma_wr != dma_rd && ((dma_rd+NR_DMA_TX-dma_wr) & (NR_DMA_TX-1)) < dma_size) { printk(KERN_WARNING DEV_LABEL "(itf %d): TX DMA full\n", vcc->dev->number); return enq_jam; } paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len, PCI_DMA_TODEVICE); ENI_PRV_PADDR(skb) = paddr; /* prepare DMA queue entries */ j = 0; eni_dev->dma[j++] = (((tx->tx_pos+TX_DESCR_SIZE) & (tx->words-1)) << MID_DMA_COUNT_SHIFT) | (tx->index << MID_DMA_CHAN_SHIFT) | MID_DT_JK; j++; if (!skb_shinfo(skb)->nr_frags) if (aal5) put_dma(tx->index,eni_dev->dma,&j,paddr,skb->len); else put_dma(tx->index,eni_dev->dma,&j,paddr+4,skb->len-4); else { DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */ for (i = -1; i < skb_shinfo(skb)->nr_frags; i++) if (i == -1) put_dma(tx->index,eni_dev->dma,&j,(unsigned long) skb->data, skb_headlen(skb)); else put_dma(tx->index,eni_dev->dma,&j,(unsigned long) skb_frag_page(&skb_shinfo(skb)->frags[i]) + skb_shinfo(skb)->frags[i].page_offset, skb_frag_size(&skb_shinfo(skb)->frags[i])); } if (skb->len & 3) { put_dma(tx->index, eni_dev->dma, &j, eni_dev->zero.dma, 4 - (skb->len & 3)); } /* JK for AAL5 trailer - AAL0 doesn't need it, but who cares ... */ eni_dev->dma[j++] = (((tx->tx_pos+size) & (tx->words-1)) << MID_DMA_COUNT_SHIFT) | (tx->index << MID_DMA_CHAN_SHIFT) | MID_DMA_END | MID_DT_JK; j++; DPRINTK("DMA at end: %d\n",j); /* store frame */ writel((MID_SEG_TX_ID << MID_SEG_ID_SHIFT) | (aal5 ? MID_SEG_AAL5 : 0) | (tx->prescaler << MID_SEG_PR_SHIFT) | (tx->resolution << MID_SEG_RATE_SHIFT) | (size/(ATM_CELL_PAYLOAD/4)),tx->send+tx->tx_pos*4); /*printk("dsc = 0x%08lx\n",(unsigned long) readl(tx->send+tx->tx_pos*4));*/ writel((vcc->vci << MID_SEG_VCI_SHIFT) | (aal5 ? 0 : (skb->data[3] & 0xf)) | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? MID_SEG_CLP : 0), tx->send+((tx->tx_pos+1) & (tx->words-1))*4); DPRINTK("size: %d, len:%d\n",size,skb->len); if (aal5) writel(skb->len,tx->send+ ((tx->tx_pos+size-AAL5_TRAILER) & (tx->words-1))*4); j = j >> 1; for (i = 0; i < j; i++) { writel(eni_dev->dma[i*2],eni_dev->tx_dma+dma_wr*8); writel(eni_dev->dma[i*2+1],eni_dev->tx_dma+dma_wr*8+4); dma_wr = (dma_wr+1) & (NR_DMA_TX-1); } ENI_PRV_POS(skb) = tx->tx_pos; ENI_PRV_SIZE(skb) = size; ENI_VCC(vcc)->txing += size; tx->tx_pos = (tx->tx_pos+size) & (tx->words-1); DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos); eni_out(dma_wr,MID_DMA_WR_TX); skb_queue_tail(&eni_dev->tx_queue,skb); queued++; return enq_ok; } static void poll_tx(struct atm_dev *dev) { struct eni_tx *tx; struct sk_buff *skb; enum enq_res res; int i; DPRINTK(">poll_tx\n"); for (i = NR_CHAN-1; i >= 0; i--) { tx = &ENI_DEV(dev)->tx[i]; if (tx->send) while ((skb = skb_dequeue(&tx->backlog))) { res = do_tx(skb); if (res == enq_ok) continue; DPRINTK("re-queuing TX PDU\n"); skb_queue_head(&tx->backlog,skb); requeued++; if (res == enq_jam) return; break; } } } static void dequeue_tx(struct atm_dev *dev) { struct eni_dev *eni_dev; struct atm_vcc *vcc; struct sk_buff *skb; struct eni_tx *tx; NULLCHECK(dev); eni_dev = ENI_DEV(dev); NULLCHECK(eni_dev); while ((skb = skb_dequeue(&eni_dev->tx_queue))) { vcc = ATM_SKB(skb)->vcc; NULLCHECK(vcc); tx = ENI_VCC(vcc)->tx; NULLCHECK(ENI_VCC(vcc)->tx); DPRINTK("dequeue_tx: next 0x%lx curr 0x%x\n",ENI_PRV_POS(skb), (unsigned) eni_in(MID_TX_DESCRSTART(tx->index))); if (ENI_VCC(vcc)->txing < tx->words && ENI_PRV_POS(skb) == eni_in(MID_TX_DESCRSTART(tx->index))) { skb_queue_head(&eni_dev->tx_queue,skb); break; } ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb); pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len, PCI_DMA_TODEVICE); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); atomic_inc(&vcc->stats->tx); wake_up(&eni_dev->tx_wait); dma_complete++; } } static struct eni_tx *alloc_tx(struct eni_dev *eni_dev,int ubr) { int i; for (i = !ubr; i < NR_CHAN; i++) if (!eni_dev->tx[i].send) return eni_dev->tx+i; return NULL; } static int comp_tx(struct eni_dev *eni_dev,int *pcr,int reserved,int *pre, int *res,int unlimited) { static const int pre_div[] = { 4,16,128,2048 }; /* 2^(((x+2)^2-(x+2))/2+1) */ if (unlimited) *pre = *res = 0; else { if (*pcr > 0) { int div; for (*pre = 0; *pre < 3; (*pre)++) if (TS_CLOCK/pre_div[*pre]/64 <= *pcr) break; div = pre_div[*pre]**pcr; DPRINTK("min div %d\n",div); *res = TS_CLOCK/div-1; } else { int div; if (!*pcr) *pcr = eni_dev->tx_bw+reserved; for (*pre = 3; *pre >= 0; (*pre)--) if (TS_CLOCK/pre_div[*pre]/64 > -*pcr) break; if (*pre < 3) (*pre)++; /* else fail later */ div = pre_div[*pre]*-*pcr; DPRINTK("max div %d\n",div); *res = DIV_ROUND_UP(TS_CLOCK, div)-1; } if (*res < 0) *res = 0; if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE; } *pcr = TS_CLOCK/pre_div[*pre]/(*res+1); DPRINTK("out pcr: %d (%d:%d)\n",*pcr,*pre,*res); return 0; } static int reserve_or_set_tx(struct atm_vcc *vcc,struct atm_trafprm *txtp, int set_rsv,int set_shp) { struct eni_dev *eni_dev = ENI_DEV(vcc->dev); struct eni_vcc *eni_vcc = ENI_VCC(vcc); struct eni_tx *tx; unsigned long size; void __iomem *mem; int rate,ubr,unlimited,new_tx; int pre,res,order; int error; rate = atm_pcr_goal(txtp); ubr = txtp->traffic_class == ATM_UBR; unlimited = ubr && (!rate || rate <= -ATM_OC3_PCR || rate >= ATM_OC3_PCR); if (!unlimited) { size = txtp->max_sdu*eni_dev->tx_mult/100; if (size > MID_MAX_BUF_SIZE && txtp->max_sdu <= MID_MAX_BUF_SIZE) size = MID_MAX_BUF_SIZE; } else { if (eni_dev->ubr) { eni_vcc->tx = eni_dev->ubr; txtp->pcr = ATM_OC3_PCR; return 0; } size = UBR_BUFFER; } new_tx = !eni_vcc->tx; mem = NULL; /* for gcc */ if (!new_tx) tx = eni_vcc->tx; else { mem = eni_alloc_mem(eni_dev,&size); if (!mem) return -ENOBUFS; tx = alloc_tx(eni_dev,unlimited); if (!tx) { eni_free_mem(eni_dev,mem,size); return -EBUSY; } DPRINTK("got chan %d\n",tx->index); tx->reserved = tx->shaping = 0; tx->send = mem; tx->words = size >> 2; skb_queue_head_init(&tx->backlog); for (order = 0; size > (1 << (order+10)); order++); eni_out((order << MID_SIZE_SHIFT) | ((tx->send-eni_dev->ram) >> (MID_LOC_SKIP+2)), MID_TX_PLACE(tx->index)); tx->tx_pos = eni_in(MID_TX_DESCRSTART(tx->index)) & MID_DESCR_START; } error = comp_tx(eni_dev,&rate,tx->reserved,&pre,&res,unlimited); if (!error && txtp->min_pcr > rate) error = -EINVAL; if (!error && txtp->max_pcr && txtp->max_pcr != ATM_MAX_PCR && txtp->max_pcr < rate) error = -EINVAL; if (!error && !ubr && rate > eni_dev->tx_bw+tx->reserved) error = -EINVAL; if (!error && set_rsv && !set_shp && rate < tx->shaping) error = -EINVAL; if (!error && !set_rsv && rate > tx->reserved && !ubr) error = -EINVAL; if (error) { if (new_tx) { tx->send = NULL; eni_free_mem(eni_dev,mem,size); } return error; } txtp->pcr = rate; if (set_rsv && !ubr) { eni_dev->tx_bw += tx->reserved; tx->reserved = rate; eni_dev->tx_bw -= rate; } if (set_shp || (unlimited && new_tx)) { if (unlimited && new_tx) eni_dev->ubr = tx; tx->prescaler = pre; tx->resolution = res; tx->shaping = rate; } if (set_shp) eni_vcc->tx = tx; DPRINTK("rsv %d shp %d\n",tx->reserved,tx->shaping); return 0; } static int open_tx_first(struct atm_vcc *vcc) { ENI_VCC(vcc)->tx = NULL; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; ENI_VCC(vcc)->txing = 0; return reserve_or_set_tx(vcc,&vcc->qos.txtp,1,1); } static int open_tx_second(struct atm_vcc *vcc) { return 0; /* nothing to do */ } static void close_tx(struct atm_vcc *vcc) { DECLARE_WAITQUEUE(wait,current); struct eni_dev *eni_dev; struct eni_vcc *eni_vcc; eni_vcc = ENI_VCC(vcc); if (!eni_vcc->tx) return; eni_dev = ENI_DEV(vcc->dev); /* wait for TX queue to drain */ DPRINTK("eni_close: waiting for TX ...\n"); add_wait_queue(&eni_dev->tx_wait,&wait); set_current_state(TASK_UNINTERRUPTIBLE); for (;;) { int txing; tasklet_disable(&eni_dev->task); txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing; tasklet_enable(&eni_dev->task); if (!txing) break; DPRINTK("%d TX left\n",eni_vcc->txing); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&eni_dev->tx_wait,&wait); if (eni_vcc->tx != eni_dev->ubr) { /* * Looping a few times in here is probably far cheaper than * keeping track of TX completions all the time, so let's poll * a bit ... */ while (eni_in(MID_TX_RDPTR(eni_vcc->tx->index)) != eni_in(MID_TX_DESCRSTART(eni_vcc->tx->index))) schedule(); eni_free_mem(eni_dev,eni_vcc->tx->send,eni_vcc->tx->words << 2); eni_vcc->tx->send = NULL; eni_dev->tx_bw += eni_vcc->tx->reserved; } eni_vcc->tx = NULL; } static int start_tx(struct atm_dev *dev) { struct eni_dev *eni_dev; int i; eni_dev = ENI_DEV(dev); eni_dev->lost = 0; eni_dev->tx_bw = ATM_OC3_PCR; eni_dev->tx_mult = DEFAULT_TX_MULT; init_waitqueue_head(&eni_dev->tx_wait); eni_dev->ubr = NULL; skb_queue_head_init(&eni_dev->tx_queue); eni_out(0,MID_DMA_WR_TX); for (i = 0; i < NR_CHAN; i++) { eni_dev->tx[i].send = NULL; eni_dev->tx[i].index = i; } return 0; } /*--------------------------------- common ----------------------------------*/ #if 0 /* may become useful again when tuning things */ static void foo(void) { printk(KERN_INFO "tx_complete=%d,dma_complete=%d,queued=%d,requeued=%d,sub=%d,\n" "backlogged=%d,rx_enqueued=%d,rx_dequeued=%d,putting=%d,pushed=%d\n", tx_complete,dma_complete,queued,requeued,submitted,backlogged, rx_enqueued,rx_dequeued,putting,pushed); if (eni_boards) printk(KERN_INFO "loss: %ld\n",ENI_DEV(eni_boards)->lost); } #endif static void bug_int(struct atm_dev *dev,unsigned long reason) { DPRINTK(">bug_int\n"); if (reason & MID_DMA_ERR_ACK) printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - DMA " "error\n",dev->number); if (reason & MID_TX_IDENT_MISM) printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - ident " "mismatch\n",dev->number); if (reason & MID_TX_DMA_OVFL) printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - DMA " "overflow\n",dev->number); EVENT("---dump ends here---\n",0,0); printk(KERN_NOTICE "---recent events---\n"); event_dump(); } static irqreturn_t eni_int(int irq,void *dev_id) { struct atm_dev *dev; struct eni_dev *eni_dev; u32 reason; DPRINTK(">eni_int\n"); dev = dev_id; eni_dev = ENI_DEV(dev); reason = eni_in(MID_ISA); DPRINTK(DEV_LABEL ": int 0x%lx\n",(unsigned long) reason); /* * Must handle these two right now, because reading ISA doesn't clear * them, so they re-occur and we never make it to the tasklet. Since * they're rare, we don't mind the occasional invocation of eni_tasklet * with eni_dev->events == 0. */ if (reason & MID_STAT_OVFL) { EVENT("stat overflow\n",0,0); eni_dev->lost += eni_in(MID_STAT) & MID_OVFL_TRASH; } if (reason & MID_SUNI_INT) { EVENT("SUNI int\n",0,0); dev->phy->interrupt(dev); #if 0 foo(); #endif } spin_lock(&eni_dev->lock); eni_dev->events |= reason; spin_unlock(&eni_dev->lock); tasklet_schedule(&eni_dev->task); return IRQ_HANDLED; } static void eni_tasklet(unsigned long data) { struct atm_dev *dev = (struct atm_dev *) data; struct eni_dev *eni_dev = ENI_DEV(dev); unsigned long flags; u32 events; DPRINTK("eni_tasklet (dev %p)\n",dev); spin_lock_irqsave(&eni_dev->lock,flags); events = xchg(&eni_dev->events,0); spin_unlock_irqrestore(&eni_dev->lock,flags); if (events & MID_RX_DMA_COMPLETE) { EVENT("INT: RX DMA complete, starting dequeue_rx\n",0,0); dequeue_rx(dev); EVENT("dequeue_rx done, starting poll_rx\n",0,0); poll_rx(dev); EVENT("poll_rx done\n",0,0); /* poll_tx ? */ } if (events & MID_SERVICE) { EVENT("INT: service, starting get_service\n",0,0); get_service(dev); EVENT("get_service done, starting poll_rx\n",0,0); poll_rx(dev); EVENT("poll_rx done\n",0,0); } if (events & MID_TX_DMA_COMPLETE) { EVENT("INT: TX DMA COMPLETE\n",0,0); dequeue_tx(dev); } if (events & MID_TX_COMPLETE) { EVENT("INT: TX COMPLETE\n",0,0); tx_complete++; wake_up(&eni_dev->tx_wait); /* poll_rx ? */ } if (events & (MID_DMA_ERR_ACK | MID_TX_IDENT_MISM | MID_TX_DMA_OVFL)) { EVENT("bug interrupt\n",0,0); bug_int(dev,events); } poll_tx(dev); } /*--------------------------------- entries ---------------------------------*/ static const char *media_name[] __devinitdata = { "MMF", "SMF", "MMF", "03?", /* 0- 3 */ "UTP", "05?", "06?", "07?", /* 4- 7 */ "TAXI","09?", "10?", "11?", /* 8-11 */ "12?", "13?", "14?", "15?", /* 12-15 */ "MMF", "SMF", "18?", "19?", /* 16-19 */ "UTP", "21?", "22?", "23?", /* 20-23 */ "24?", "25?", "26?", "27?", /* 24-27 */ "28?", "29?", "30?", "31?" /* 28-31 */ }; #define SET_SEPROM \ ({ if (!error && !pci_error) { \ pci_error = pci_write_config_byte(eni_dev->pci_dev,PCI_TONGA_CTRL,tonga); \ udelay(10); /* 10 usecs */ \ } }) #define GET_SEPROM \ ({ if (!error && !pci_error) { \ pci_error = pci_read_config_byte(eni_dev->pci_dev,PCI_TONGA_CTRL,&tonga); \ udelay(10); /* 10 usecs */ \ } }) static int __devinit get_esi_asic(struct atm_dev *dev) { struct eni_dev *eni_dev; unsigned char tonga; int error,failed,pci_error; int address,i,j; eni_dev = ENI_DEV(dev); error = pci_error = 0; tonga = SEPROM_MAGIC | SEPROM_DATA | SEPROM_CLK; SET_SEPROM; for (i = 0; i < ESI_LEN && !error && !pci_error; i++) { /* start operation */ tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; tonga &= ~SEPROM_DATA; SET_SEPROM; tonga &= ~SEPROM_CLK; SET_SEPROM; /* send address */ address = ((i+SEPROM_ESI_BASE) << 1)+1; for (j = 7; j >= 0; j--) { tonga = (address >> j) & 1 ? tonga | SEPROM_DATA : tonga & ~SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; tonga &= ~SEPROM_CLK; SET_SEPROM; } /* get ack */ tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; GET_SEPROM; failed = tonga & SEPROM_DATA; tonga &= ~SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; if (failed) error = -EIO; else { dev->esi[i] = 0; for (j = 7; j >= 0; j--) { dev->esi[i] <<= 1; tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; GET_SEPROM; if (tonga & SEPROM_DATA) dev->esi[i] |= 1; tonga &= ~SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; } /* get ack */ tonga |= SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; GET_SEPROM; if (!(tonga & SEPROM_DATA)) error = -EIO; tonga &= ~SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; } /* stop operation */ tonga &= ~SEPROM_DATA; SET_SEPROM; tonga |= SEPROM_CLK; SET_SEPROM; tonga |= SEPROM_DATA; SET_SEPROM; } if (pci_error) { printk(KERN_ERR DEV_LABEL "(itf %d): error reading ESI " "(0x%02x)\n",dev->number,pci_error); error = -EIO; } return error; } #undef SET_SEPROM #undef GET_SEPROM static int __devinit get_esi_fpga(struct atm_dev *dev, void __iomem *base) { void __iomem *mac_base; int i; mac_base = base+EPROM_SIZE-sizeof(struct midway_eprom); for (i = 0; i < ESI_LEN; i++) dev->esi[i] = readb(mac_base+(i^3)); return 0; } static int __devinit eni_do_init(struct atm_dev *dev) { struct midway_eprom __iomem *eprom; struct eni_dev *eni_dev; struct pci_dev *pci_dev; unsigned long real_base; void __iomem *base; int error,i,last; DPRINTK(">eni_init\n"); dev->ci_range.vpi_bits = 0; dev->ci_range.vci_bits = NR_VCI_LD; dev->link_rate = ATM_OC3_PCR; eni_dev = ENI_DEV(dev); pci_dev = eni_dev->pci_dev; real_base = pci_resource_start(pci_dev, 0); eni_dev->irq = pci_dev->irq; if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, PCI_COMMAND_MEMORY | (eni_dev->asic ? PCI_COMMAND_PARITY | PCI_COMMAND_SERR : 0)))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory " "(0x%02x)\n",dev->number,error); return -EIO; } printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,", dev->number,pci_dev->revision,real_base,eni_dev->irq); if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) { printk("\n"); printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page " "mapping\n",dev->number); return error; } eni_dev->ioaddr = base; eni_dev->base_diff = real_base - (unsigned long) base; /* id may not be present in ASIC Tonga boards - check this @@@ */ if (!eni_dev->asic) { eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom)); if (readl(&eprom->magic) != ENI155_MAGIC) { printk("\n"); printk(KERN_ERR DEV_LABEL "(itf %d): bad magic - expected 0x%x, got 0x%x\n", dev->number, ENI155_MAGIC, (unsigned)readl(&eprom->magic)); error = -EINVAL; goto unmap; } } eni_dev->phy = base+PHY_BASE; eni_dev->reg = base+REG_BASE; eni_dev->ram = base+RAM_BASE; last = MAP_MAX_SIZE-RAM_BASE; for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { writel(0x55555555,eni_dev->ram+i); if (readl(eni_dev->ram+i) != 0x55555555) last = i; else { writel(0xAAAAAAAA,eni_dev->ram+i); if (readl(eni_dev->ram+i) != 0xAAAAAAAA) last = i; else writel(i,eni_dev->ram+i); } } for (i = 0; i < last; i += RAM_INCREMENT) if (readl(eni_dev->ram+i) != i) break; eni_dev->mem = i; memset_io(eni_dev->ram,0,eni_dev->mem); /* TODO: should shrink allocation now */ printk("mem=%dkB (",eni_dev->mem >> 10); /* TODO: check for non-SUNI, check for TAXI ? */ if (!(eni_in(MID_RES_ID_MCON) & 0x200) != !eni_dev->asic) { printk(")\n"); printk(KERN_ERR DEV_LABEL "(itf %d): ERROR - wrong id 0x%x\n", dev->number,(unsigned) eni_in(MID_RES_ID_MCON)); error = -EINVAL; goto unmap; } error = eni_dev->asic ? get_esi_asic(dev) : get_esi_fpga(dev,base); if (error) goto unmap; for (i = 0; i < ESI_LEN; i++) printk("%s%02X",i ? "-" : "",dev->esi[i]); printk(")\n"); printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number, eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA", media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]); error = suni_init(dev); if (error) goto unmap; out: return error; unmap: iounmap(base); goto out; } static void eni_do_release(struct atm_dev *dev) { struct eni_dev *ed = ENI_DEV(dev); dev->phy->stop(dev); dev->phy = NULL; iounmap(ed->ioaddr); } static int __devinit eni_start(struct atm_dev *dev) { struct eni_dev *eni_dev; void __iomem *buf; unsigned long buffer_mem; int error; DPRINTK(">eni_start\n"); eni_dev = ENI_DEV(dev); if (request_irq(eni_dev->irq,&eni_int,IRQF_SHARED,DEV_LABEL,dev)) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number,eni_dev->irq); error = -EAGAIN; goto out; } pci_set_master(eni_dev->pci_dev); if ((error = pci_write_config_word(eni_dev->pci_dev,PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | (eni_dev->asic ? PCI_COMMAND_PARITY | PCI_COMMAND_SERR : 0)))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+" "master (0x%02x)\n",dev->number,error); goto free_irq; } if ((error = pci_write_config_byte(eni_dev->pci_dev,PCI_TONGA_CTRL, END_SWAP_DMA))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't set endian swap " "(0x%02x)\n",dev->number,error); goto free_irq; } /* determine addresses of internal tables */ eni_dev->vci = eni_dev->ram; eni_dev->rx_dma = eni_dev->ram+NR_VCI*16; eni_dev->tx_dma = eni_dev->rx_dma+NR_DMA_RX*8; eni_dev->service = eni_dev->tx_dma+NR_DMA_TX*8; buf = eni_dev->service+NR_SERVICE*4; DPRINTK("vci 0x%lx,rx 0x%lx, tx 0x%lx,srv 0x%lx,buf 0x%lx\n", eni_dev->vci,eni_dev->rx_dma,eni_dev->tx_dma, eni_dev->service,buf); spin_lock_init(&eni_dev->lock); tasklet_init(&eni_dev->task,eni_tasklet,(unsigned long) dev); eni_dev->events = 0; /* initialize memory management */ buffer_mem = eni_dev->mem - (buf - eni_dev->ram); eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2; eni_dev->free_list = kmalloc( sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL); if (!eni_dev->free_list) { printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", dev->number); error = -ENOMEM; goto free_irq; } eni_dev->free_len = 0; eni_put_free(eni_dev,buf,buffer_mem); memset_io(eni_dev->vci,0,16*NR_VCI); /* clear VCI table */ /* * byte_addr free (k) * 0x00000000 512 VCI table * 0x00004000 496 RX DMA * 0x00005000 492 TX DMA * 0x00006000 488 service list * 0x00007000 484 buffers * 0x00080000 0 end (512kB) */ eni_out(0xffffffff,MID_IE); error = start_tx(dev); if (error) goto free_list; error = start_rx(dev); if (error) goto free_list; error = dev->phy->start(dev); if (error) goto free_list; eni_out(eni_in(MID_MC_S) | (1 << MID_INT_SEL_SHIFT) | MID_TX_LOCK_MODE | MID_DMA_ENABLE | MID_TX_ENABLE | MID_RX_ENABLE, MID_MC_S); /* Tonga uses SBus INTReq1 */ (void) eni_in(MID_ISA); /* clear Midway interrupts */ return 0; free_list: kfree(eni_dev->free_list); free_irq: free_irq(eni_dev->irq, dev); out: return error; } static void eni_close(struct atm_vcc *vcc) { DPRINTK(">eni_close\n"); if (!ENI_VCC(vcc)) return; clear_bit(ATM_VF_READY,&vcc->flags); close_rx(vcc); close_tx(vcc); DPRINTK("eni_close: done waiting\n"); /* deallocate memory */ kfree(ENI_VCC(vcc)); vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); /*foo();*/ } static int eni_open(struct atm_vcc *vcc) { struct eni_vcc *eni_vcc; int error; short vpi = vcc->vpi; int vci = vcc->vci; DPRINTK(">eni_open\n"); EVENT("eni_open\n",0,0); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) set_bit(ATM_VF_ADDR,&vcc->flags); if (vcc->qos.aal != ATM_AAL0 && vcc->qos.aal != ATM_AAL5) return -EINVAL; DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, vcc->vci); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { eni_vcc = kmalloc(sizeof(struct eni_vcc),GFP_KERNEL); if (!eni_vcc) return -ENOMEM; vcc->dev_data = eni_vcc; eni_vcc->tx = NULL; /* for eni_close after open_rx */ if ((error = open_rx_first(vcc))) { eni_close(vcc); return error; } if ((error = open_tx_first(vcc))) { eni_close(vcc); return error; } } if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; if ((error = open_rx_second(vcc))) { eni_close(vcc); return error; } if ((error = open_tx_second(vcc))) { eni_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); /* should power down SUNI while !ref_count @@@ */ return 0; } static int eni_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flgs) { struct eni_dev *eni_dev = ENI_DEV(vcc->dev); struct eni_tx *tx = ENI_VCC(vcc)->tx; struct sk_buff *skb; int error,rate,rsv,shp; if (qos->txtp.traffic_class == ATM_NONE) return 0; if (tx == eni_dev->ubr) return -EBADFD; rate = atm_pcr_goal(&qos->txtp); if (rate < 0) rate = -rate; rsv = shp = 0; if ((flgs & ATM_MF_DEC_RSV) && rate && rate < tx->reserved) rsv = 1; if ((flgs & ATM_MF_INC_RSV) && (!rate || rate > tx->reserved)) rsv = 1; if ((flgs & ATM_MF_DEC_SHP) && rate && rate < tx->shaping) shp = 1; if ((flgs & ATM_MF_INC_SHP) && (!rate || rate > tx->shaping)) shp = 1; if (!rsv && !shp) return 0; error = reserve_or_set_tx(vcc,&qos->txtp,rsv,shp); if (error) return error; if (shp && !(flgs & ATM_MF_IMMED)) return 0; /* * Walk through the send buffer and patch the rate information in all * segmentation buffer descriptors of this VCC. */ tasklet_disable(&eni_dev->task); skb_queue_walk(&eni_dev->tx_queue, skb) { void __iomem *dsc; if (ATM_SKB(skb)->vcc != vcc) continue; dsc = tx->send+ENI_PRV_POS(skb)*4; writel((readl(dsc) & ~(MID_SEG_RATE | MID_SEG_PR)) | (tx->prescaler << MID_SEG_PR_SHIFT) | (tx->resolution << MID_SEG_RATE_SHIFT), dsc); } tasklet_enable(&eni_dev->task); return 0; } static int eni_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { struct eni_dev *eni_dev = ENI_DEV(dev); if (cmd == ENI_MEMDUMP) { if (!capable(CAP_NET_ADMIN)) return -EPERM; printk(KERN_WARNING "Please use /proc/atm/" DEV_LABEL ":%d " "instead of obsolete ioctl ENI_MEMDUMP\n",dev->number); dump(dev); return 0; } if (cmd == ENI_SETMULT) { struct eni_multipliers mult; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&mult, arg, sizeof(struct eni_multipliers))) return -EFAULT; if ((mult.tx && mult.tx <= 100) || (mult.rx &&mult.rx <= 100) || mult.tx > 65536 || mult.rx > 65536) return -EINVAL; if (mult.tx) eni_dev->tx_mult = mult.tx; if (mult.rx) eni_dev->rx_mult = mult.rx; return 0; } if (cmd == ATM_SETCIRANGE) { struct atm_cirange ci; if (copy_from_user(&ci, arg,sizeof(struct atm_cirange))) return -EFAULT; if ((ci.vpi_bits == 0 || ci.vpi_bits == ATM_CI_MAX) && (ci.vci_bits == NR_VCI_LD || ci.vpi_bits == ATM_CI_MAX)) return 0; return -EINVAL; } if (!dev->phy->ioctl) return -ENOIOCTLCMD; return dev->phy->ioctl(dev,cmd,arg); } static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen) { return -EINVAL; } static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,unsigned int optlen) { return -EINVAL; } static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb) { enum enq_res res; DPRINTK(">eni_send\n"); if (!ENI_VCC(vcc)->tx) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } if (!skb) { printk(KERN_CRIT "!skb in eni_send ?\n"); if (vcc->pop) vcc->pop(vcc,skb); return -EINVAL; } if (vcc->qos.aal == ATM_AAL0) { if (skb->len != ATM_CELL_SIZE-1) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } *(u32 *) skb->data = htonl(*(u32 *) skb->data); } submitted++; ATM_SKB(skb)->vcc = vcc; tasklet_disable(&ENI_DEV(vcc->dev)->task); res = do_tx(skb); tasklet_enable(&ENI_DEV(vcc->dev)->task); if (res == enq_ok) return 0; skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb); backlogged++; tasklet_schedule(&ENI_DEV(vcc->dev)->task); return 0; } static void eni_phy_put(struct atm_dev *dev,unsigned char value, unsigned long addr) { writel(value,ENI_DEV(dev)->phy+addr*4); } static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr) { return readl(ENI_DEV(dev)->phy+addr*4); } static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page) { struct hlist_node *node; struct sock *s; static const char *signal[] = { "LOST","unknown","okay" }; struct eni_dev *eni_dev = ENI_DEV(dev); struct atm_vcc *vcc; int left,i; left = *pos; if (!left) return sprintf(page,DEV_LABEL "(itf %d) signal %s, %dkB, " "%d cps remaining\n",dev->number,signal[(int) dev->signal], eni_dev->mem >> 10,eni_dev->tx_bw); if (!--left) return sprintf(page,"%4sBursts: TX" #if !defined(CONFIG_ATM_ENI_BURST_TX_16W) && \ !defined(CONFIG_ATM_ENI_BURST_TX_8W) && \ !defined(CONFIG_ATM_ENI_BURST_TX_4W) && \ !defined(CONFIG_ATM_ENI_BURST_TX_2W) " none" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_16W " 16W" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_8W " 8W" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_4W " 4W" #endif #ifdef CONFIG_ATM_ENI_BURST_TX_2W " 2W" #endif ", RX" #if !defined(CONFIG_ATM_ENI_BURST_RX_16W) && \ !defined(CONFIG_ATM_ENI_BURST_RX_8W) && \ !defined(CONFIG_ATM_ENI_BURST_RX_4W) && \ !defined(CONFIG_ATM_ENI_BURST_RX_2W) " none" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_16W " 16W" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_8W " 8W" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_4W " 4W" #endif #ifdef CONFIG_ATM_ENI_BURST_RX_2W " 2W" #endif #ifndef CONFIG_ATM_ENI_TUNE_BURST " (default)" #endif "\n",""); if (!--left) return sprintf(page,"%4sBuffer multipliers: tx %d%%, rx %d%%\n", "",eni_dev->tx_mult,eni_dev->rx_mult); for (i = 0; i < NR_CHAN; i++) { struct eni_tx *tx = eni_dev->tx+i; if (!tx->send) continue; if (!--left) { return sprintf(page,"tx[%d]: 0x%ld-0x%ld " "(%6ld bytes), rsv %d cps, shp %d cps%s\n",i, (unsigned long) (tx->send - eni_dev->ram), tx->send-eni_dev->ram+tx->words*4-1,tx->words*4, tx->reserved,tx->shaping, tx == eni_dev->ubr ? " (UBR)" : ""); } if (--left) continue; return sprintf(page,"%10sbacklog %u packets\n","", skb_queue_len(&tx->backlog)); } read_lock(&vcc_sklist_lock); for(i = 0; i < VCC_HTABLE_SIZE; ++i) { struct hlist_head *head = &vcc_hash[i]; sk_for_each(s, node, head) { struct eni_vcc *eni_vcc; int length; vcc = atm_sk(s); if (vcc->dev != dev) continue; eni_vcc = ENI_VCC(vcc); if (--left) continue; length = sprintf(page,"vcc %4d: ",vcc->vci); if (eni_vcc->rx) { length += sprintf(page+length,"0x%ld-0x%ld " "(%6ld bytes)", (unsigned long) (eni_vcc->recv - eni_dev->ram), eni_vcc->recv-eni_dev->ram+eni_vcc->words*4-1, eni_vcc->words*4); if (eni_vcc->tx) length += sprintf(page+length,", "); } if (eni_vcc->tx) length += sprintf(page+length,"tx[%d], txing %d bytes", eni_vcc->tx->index,eni_vcc->txing); page[length] = '\n'; read_unlock(&vcc_sklist_lock); return length+1; } } read_unlock(&vcc_sklist_lock); for (i = 0; i < eni_dev->free_len; i++) { struct eni_free *fe = eni_dev->free_list+i; unsigned long offset; if (--left) continue; offset = (unsigned long) eni_dev->ram+eni_dev->base_diff; return sprintf(page,"free %p-%p (%6d bytes)\n", fe->start-offset,fe->start-offset+(1 << fe->order)-1, 1 << fe->order); } return 0; } static const struct atmdev_ops ops = { .open = eni_open, .close = eni_close, .ioctl = eni_ioctl, .getsockopt = eni_getsockopt, .setsockopt = eni_setsockopt, .send = eni_send, .phy_put = eni_phy_put, .phy_get = eni_phy_get, .change_qos = eni_change_qos, .proc_read = eni_proc_read }; static int __devinit eni_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { struct atm_dev *dev; struct eni_dev *eni_dev; struct eni_zero *zero; int rc; rc = pci_enable_device(pci_dev); if (rc < 0) goto out; rc = -ENOMEM; eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); if (!eni_dev) goto err_disable; zero = &eni_dev->zero; zero->addr = pci_alloc_consistent(pci_dev, ENI_ZEROES_SIZE, &zero->dma); if (!zero->addr) goto err_kfree; dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto err_free_consistent; dev->dev_data = eni_dev; pci_set_drvdata(pci_dev, dev); eni_dev->pci_dev = pci_dev; eni_dev->asic = ent->driver_data; rc = eni_do_init(dev); if (rc < 0) goto err_unregister; rc = eni_start(dev); if (rc < 0) goto err_eni_release; eni_dev->more = eni_boards; eni_boards = dev; out: return rc; err_eni_release: eni_do_release(dev); err_unregister: atm_dev_deregister(dev); err_free_consistent: pci_free_consistent(pci_dev, ENI_ZEROES_SIZE, zero->addr, zero->dma); err_kfree: kfree(eni_dev); err_disable: pci_disable_device(pci_dev); goto out; } static struct pci_device_id eni_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ }, { 0, } }; MODULE_DEVICE_TABLE(pci,eni_pci_tbl); static void __devexit eni_remove_one(struct pci_dev *pdev) { struct atm_dev *dev = pci_get_drvdata(pdev); struct eni_dev *ed = ENI_DEV(dev); struct eni_zero *zero = &ed->zero; eni_do_release(dev); atm_dev_deregister(dev); pci_free_consistent(pdev, ENI_ZEROES_SIZE, zero->addr, zero->dma); kfree(ed); pci_disable_device(pdev); } static struct pci_driver eni_driver = { .name = DEV_LABEL, .id_table = eni_pci_tbl, .probe = eni_init_one, .remove = __devexit_p(eni_remove_one), }; static int __init eni_init(void) { struct sk_buff *skb; /* dummy for sizeof */ if (sizeof(skb->cb) < sizeof(struct eni_skb_prv)) { printk(KERN_ERR "eni_detect: skb->cb is too small (%Zd < %Zd)\n", sizeof(skb->cb),sizeof(struct eni_skb_prv)); return -EIO; } return pci_register_driver(&eni_driver); } module_init(eni_init); /* @@@ since exit routine not defined, this module can not be unloaded */ MODULE_LICENSE("GPL");
gpl-2.0
beboom/a10s-linux
fs/ocfs2/cluster/netdebug.c
7994
14560
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * netdebug.c * * debug functionality for o2net * * Copyright (C) 2005, 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #ifdef CONFIG_DEBUG_FS #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/kref.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include "tcp.h" #include "nodemanager.h" #define MLOG_MASK_PREFIX ML_TCP #include "masklog.h" #include "tcp_internal.h" #define O2NET_DEBUG_DIR "o2net" #define SC_DEBUG_NAME "sock_containers" #define NST_DEBUG_NAME "send_tracking" #define STATS_DEBUG_NAME "stats" #define NODES_DEBUG_NAME "connected_nodes" #define SHOW_SOCK_CONTAINERS 0 #define SHOW_SOCK_STATS 1 static struct dentry *o2net_dentry; static struct dentry *sc_dentry; static struct dentry *nst_dentry; static struct dentry *stats_dentry; static struct dentry *nodes_dentry; static DEFINE_SPINLOCK(o2net_debug_lock); static LIST_HEAD(sock_containers); static LIST_HEAD(send_tracking); void o2net_debug_add_nst(struct o2net_send_tracking *nst) { spin_lock(&o2net_debug_lock); list_add(&nst->st_net_debug_item, &send_tracking); spin_unlock(&o2net_debug_lock); } void o2net_debug_del_nst(struct o2net_send_tracking *nst) { spin_lock(&o2net_debug_lock); if (!list_empty(&nst->st_net_debug_item)) list_del_init(&nst->st_net_debug_item); spin_unlock(&o2net_debug_lock); } static struct o2net_send_tracking *next_nst(struct o2net_send_tracking *nst_start) { struct o2net_send_tracking *nst, *ret = NULL; assert_spin_locked(&o2net_debug_lock); list_for_each_entry(nst, &nst_start->st_net_debug_item, st_net_debug_item) { /* discover the head of the list */ if (&nst->st_net_debug_item == &send_tracking) break; /* use st_task to detect real nsts in the list */ if (nst->st_task != NULL) { ret = nst; break; } } return ret; } static void *nst_seq_start(struct seq_file *seq, loff_t *pos) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); spin_unlock(&o2net_debug_lock); return nst; } static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); list_del_init(&dummy_nst->st_net_debug_item); if (nst) list_add(&dummy_nst->st_net_debug_item, &nst->st_net_debug_item); spin_unlock(&o2net_debug_lock); return nst; /* unused, just needs to be null when done */ } static int nst_seq_show(struct seq_file *seq, void *v) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; ktime_t now; s64 sock, send, status; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); if (!nst) goto out; now = ktime_get(); sock = ktime_to_us(ktime_sub(now, nst->st_sock_time)); send = ktime_to_us(ktime_sub(now, nst->st_send_time)); status = ktime_to_us(ktime_sub(now, nst->st_status_time)); /* get_task_comm isn't exported. oh well. */ seq_printf(seq, "%p:\n" " pid: %lu\n" " tgid: %lu\n" " process name: %s\n" " node: %u\n" " sc: %p\n" " message id: %d\n" " message type: %u\n" " message key: 0x%08x\n" " sock acquiry: %lld usecs ago\n" " send start: %lld usecs ago\n" " wait start: %lld usecs ago\n", nst, (unsigned long)task_pid_nr(nst->st_task), (unsigned long)nst->st_task->tgid, nst->st_task->comm, nst->st_node, nst->st_sc, nst->st_id, nst->st_msg_type, nst->st_msg_key, (long long)sock, (long long)send, (long long)status); out: spin_unlock(&o2net_debug_lock); return 0; } static void nst_seq_stop(struct seq_file *seq, void *v) { } static const struct seq_operations nst_seq_ops = { .start = nst_seq_start, .next = nst_seq_next, .stop = nst_seq_stop, .show = nst_seq_show, }; static int nst_fop_open(struct inode *inode, struct file *file) { struct o2net_send_tracking *dummy_nst; struct seq_file *seq; int ret; dummy_nst = kmalloc(sizeof(struct o2net_send_tracking), GFP_KERNEL); if (dummy_nst == NULL) { ret = -ENOMEM; goto out; } dummy_nst->st_task = NULL; ret = seq_open(file, &nst_seq_ops); if (ret) goto out; seq = file->private_data; seq->private = dummy_nst; o2net_debug_add_nst(dummy_nst); dummy_nst = NULL; out: kfree(dummy_nst); return ret; } static int nst_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct o2net_send_tracking *dummy_nst = seq->private; o2net_debug_del_nst(dummy_nst); return seq_release_private(inode, file); } static const struct file_operations nst_seq_fops = { .open = nst_fop_open, .read = seq_read, .llseek = seq_lseek, .release = nst_fop_release, }; void o2net_debug_add_sc(struct o2net_sock_container *sc) { spin_lock(&o2net_debug_lock); list_add(&sc->sc_net_debug_item, &sock_containers); spin_unlock(&o2net_debug_lock); } void o2net_debug_del_sc(struct o2net_sock_container *sc) { spin_lock(&o2net_debug_lock); list_del_init(&sc->sc_net_debug_item); spin_unlock(&o2net_debug_lock); } struct o2net_sock_debug { int dbg_ctxt; struct o2net_sock_container *dbg_sock; }; static struct o2net_sock_container *next_sc(struct o2net_sock_container *sc_start) { struct o2net_sock_container *sc, *ret = NULL; assert_spin_locked(&o2net_debug_lock); list_for_each_entry(sc, &sc_start->sc_net_debug_item, sc_net_debug_item) { /* discover the head of the list miscast as a sc */ if (&sc->sc_net_debug_item == &sock_containers) break; /* use sc_page to detect real scs in the list */ if (sc->sc_page != NULL) { ret = sc; break; } } return ret; } static void *sc_seq_start(struct seq_file *seq, loff_t *pos) { struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); spin_unlock(&o2net_debug_lock); return sc; } static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); list_del_init(&dummy_sc->sc_net_debug_item); if (sc) list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item); spin_unlock(&o2net_debug_lock); return sc; /* unused, just needs to be null when done */ } #ifdef CONFIG_OCFS2_FS_STATS # define sc_send_count(_s) ((_s)->sc_send_count) # define sc_recv_count(_s) ((_s)->sc_recv_count) # define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total)) # define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total)) # define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total)) # define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total)) #else # define sc_send_count(_s) (0U) # define sc_recv_count(_s) (0U) # define sc_tv_acquiry_total_ns(_s) (0LL) # define sc_tv_send_total_ns(_s) (0LL) # define sc_tv_status_total_ns(_s) (0LL) # define sc_tv_process_total_ns(_s) (0LL) #endif /* So that debugfs.ocfs2 can determine which format is being used */ #define O2NET_STATS_STR_VERSION 1 static void sc_show_sock_stats(struct seq_file *seq, struct o2net_sock_container *sc) { if (!sc) return; seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION, sc->sc_node->nd_num, (unsigned long)sc_send_count(sc), (long long)sc_tv_acquiry_total_ns(sc), (long long)sc_tv_send_total_ns(sc), (long long)sc_tv_status_total_ns(sc), (unsigned long)sc_recv_count(sc), (long long)sc_tv_process_total_ns(sc)); } static void sc_show_sock_container(struct seq_file *seq, struct o2net_sock_container *sc) { struct inet_sock *inet = NULL; __be32 saddr = 0, daddr = 0; __be16 sport = 0, dport = 0; if (!sc) return; if (sc->sc_sock) { inet = inet_sk(sc->sc_sock->sk); /* the stack's structs aren't sparse endian clean */ saddr = (__force __be32)inet->inet_saddr; daddr = (__force __be32)inet->inet_daddr; sport = (__force __be16)inet->inet_sport; dport = (__force __be16)inet->inet_dport; } /* XXX sigh, inet-> doesn't have sparse annotation so any * use of it here generates a warning with -Wbitwise */ seq_printf(seq, "%p:\n" " krefs: %d\n" " sock: %pI4:%u -> " "%pI4:%u\n" " remote node: %s\n" " page off: %zu\n" " handshake ok: %u\n" " timer: %lld usecs\n" " data ready: %lld usecs\n" " advance start: %lld usecs\n" " advance stop: %lld usecs\n" " func start: %lld usecs\n" " func stop: %lld usecs\n" " func key: 0x%08x\n" " func type: %u\n", sc, atomic_read(&sc->sc_kref.refcount), &saddr, inet ? ntohs(sport) : 0, &daddr, inet ? ntohs(dport) : 0, sc->sc_node->nd_name, sc->sc_page_off, sc->sc_handshake_ok, (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(sc->sc_tv_data_ready), (long long)ktime_to_us(sc->sc_tv_advance_start), (long long)ktime_to_us(sc->sc_tv_advance_stop), (long long)ktime_to_us(sc->sc_tv_func_start), (long long)ktime_to_us(sc->sc_tv_func_stop), sc->sc_msg_key, sc->sc_msg_type); } static int sc_seq_show(struct seq_file *seq, void *v) { struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); if (sc) { if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS) sc_show_sock_container(seq, sc); else sc_show_sock_stats(seq, sc); } spin_unlock(&o2net_debug_lock); return 0; } static void sc_seq_stop(struct seq_file *seq, void *v) { } static const struct seq_operations sc_seq_ops = { .start = sc_seq_start, .next = sc_seq_next, .stop = sc_seq_stop, .show = sc_seq_show, }; static int sc_common_open(struct file *file, struct o2net_sock_debug *sd) { struct o2net_sock_container *dummy_sc; struct seq_file *seq; int ret; dummy_sc = kmalloc(sizeof(struct o2net_sock_container), GFP_KERNEL); if (dummy_sc == NULL) { ret = -ENOMEM; goto out; } dummy_sc->sc_page = NULL; ret = seq_open(file, &sc_seq_ops); if (ret) goto out; seq = file->private_data; seq->private = sd; sd->dbg_sock = dummy_sc; o2net_debug_add_sc(dummy_sc); dummy_sc = NULL; out: kfree(dummy_sc); return ret; } static int sc_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct o2net_sock_debug *sd = seq->private; struct o2net_sock_container *dummy_sc = sd->dbg_sock; o2net_debug_del_sc(dummy_sc); return seq_release_private(inode, file); } static int stats_fop_open(struct inode *inode, struct file *file) { struct o2net_sock_debug *sd; sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); if (sd == NULL) return -ENOMEM; sd->dbg_ctxt = SHOW_SOCK_STATS; sd->dbg_sock = NULL; return sc_common_open(file, sd); } static const struct file_operations stats_seq_fops = { .open = stats_fop_open, .read = seq_read, .llseek = seq_lseek, .release = sc_fop_release, }; static int sc_fop_open(struct inode *inode, struct file *file) { struct o2net_sock_debug *sd; sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); if (sd == NULL) return -ENOMEM; sd->dbg_ctxt = SHOW_SOCK_CONTAINERS; sd->dbg_sock = NULL; return sc_common_open(file, sd); } static const struct file_operations sc_seq_fops = { .open = sc_fop_open, .read = seq_read, .llseek = seq_lseek, .release = sc_fop_release, }; static int o2net_fill_bitmap(char *buf, int len) { unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; int i = -1, out = 0; o2net_fill_node_map(map, sizeof(map)); while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); out += snprintf(buf + out, PAGE_SIZE - out, "\n"); return out; } static int nodes_fop_open(struct inode *inode, struct file *file) { char *buf; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE)); file->private_data = buf; return 0; } static int o2net_debug_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static ssize_t o2net_debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, i_size_read(file->f_mapping->host)); } static const struct file_operations nodes_fops = { .open = nodes_fop_open, .release = o2net_debug_release, .read = o2net_debug_read, .llseek = generic_file_llseek, }; void o2net_debugfs_exit(void) { debugfs_remove(nodes_dentry); debugfs_remove(stats_dentry); debugfs_remove(sc_dentry); debugfs_remove(nst_dentry); debugfs_remove(o2net_dentry); } int o2net_debugfs_init(void) { umode_t mode = S_IFREG|S_IRUSR; o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); if (o2net_dentry) nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode, o2net_dentry, NULL, &nst_seq_fops); if (nst_dentry) sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode, o2net_dentry, NULL, &sc_seq_fops); if (sc_dentry) stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode, o2net_dentry, NULL, &stats_seq_fops); if (stats_dentry) nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode, o2net_dentry, NULL, &nodes_fops); if (nodes_dentry) return 0; o2net_debugfs_exit(); mlog_errno(-ENOMEM); return -ENOMEM; } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
skinner12/SkeRneL
fs/ntfs/lcnalloc.c
14394
33171
/* * lcnalloc.c - Cluster (de)allocation code. Part of the Linux-NTFS project. * * Copyright (c) 2004-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef NTFS_RW #include <linux/pagemap.h> #include "lcnalloc.h" #include "debug.h" #include "bitmap.h" #include "inode.h" #include "volume.h" #include "attrib.h" #include "malloc.h" #include "aops.h" #include "ntfs.h" /** * ntfs_cluster_free_from_rl_nolock - free clusters from runlist * @vol: mounted ntfs volume on which to free the clusters * @rl: runlist describing the clusters to free * * Free all the clusters described by the runlist @rl on the volume @vol. In * the case of an error being returned, at least some of the clusters were not * freed. * * Return 0 on success and -errno on error. * * Locking: - The volume lcn bitmap must be locked for writing on entry and is * left locked on return. */ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, const runlist_element *rl) { struct inode *lcnbmp_vi = vol->lcnbmp_ino; int ret = 0; ntfs_debug("Entering."); if (!rl) return 0; for (; rl->length; rl++) { int err; if (rl->lcn < 0) continue; err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length); if (unlikely(err && (!ret || ret == -ENOMEM) && ret != err)) ret = err; } ntfs_debug("Done."); return ret; } /** * ntfs_cluster_alloc - allocate clusters on an ntfs volume * @vol: mounted ntfs volume on which to allocate the clusters * @start_vcn: vcn to use for the first allocated cluster * @count: number of clusters to allocate * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none) * @zone: zone from which to allocate the clusters * @is_extension: if 'true', this is an attribute extension * * Allocate @count clusters preferably starting at cluster @start_lcn or at the * current allocator position if @start_lcn is -1, on the mounted ntfs volume * @vol. @zone is either DATA_ZONE for allocation of normal clusters or * MFT_ZONE for allocation of clusters for the master file table, i.e. the * $MFT/$DATA attribute. * * @start_vcn specifies the vcn of the first allocated cluster. This makes * merging the resulting runlist with the old runlist easier. * * If @is_extension is 'true', the caller is allocating clusters to extend an * attribute and if it is 'false', the caller is allocating clusters to fill a * hole in an attribute. Practically the difference is that if @is_extension * is 'true' the returned runlist will be terminated with LCN_ENOENT and if * @is_extension is 'false' the runlist will be terminated with * LCN_RL_NOT_MAPPED. * * You need to check the return value with IS_ERR(). If this is false, the * function was successful and the return value is a runlist describing the * allocated cluster(s). If IS_ERR() is true, the function failed and * PTR_ERR() gives you the error code. * * Notes on the allocation algorithm * ================================= * * There are two data zones. First is the area between the end of the mft zone * and the end of the volume, and second is the area between the start of the * volume and the start of the mft zone. On unmodified/standard NTFS 1.x * volumes, the second data zone does not exist due to the mft zone being * expanded to cover the start of the volume in order to reserve space for the * mft bitmap attribute. * * This is not the prettiest function but the complexity stems from the need of * implementing the mft vs data zoned approach and from the fact that we have * access to the lcn bitmap in portions of up to 8192 bytes at a time, so we * need to cope with crossing over boundaries of two buffers. Further, the * fact that the allocator allows for caller supplied hints as to the location * of where allocation should begin and the fact that the allocator keeps track * of where in the data zones the next natural allocation should occur, * contribute to the complexity of the function. But it should all be * worthwhile, because this allocator should: 1) be a full implementation of * the MFT zone approach used by Windows NT, 2) cause reduction in * fragmentation, and 3) be speedy in allocations (the code is not optimized * for speed, but the algorithm is, so further speed improvements are probably * possible). * * FIXME: We should be monitoring cluster allocation and increment the MFT zone * size dynamically but this is something for the future. We will just cause * heavier fragmentation by not doing it and I am not even sure Windows would * grow the MFT zone dynamically, so it might even be correct not to do this. * The overhead in doing dynamic MFT zone expansion would be very large and * unlikely worth the effort. (AIA) * * TODO: I have added in double the required zone position pointer wrap around * logic which can be optimized to having only one of the two logic sets. * However, having the double logic will work fine, but if we have only one of * the sets and we get it wrong somewhere, then we get into trouble, so * removing the duplicate logic requires _very_ careful consideration of _all_ * possible code paths. So at least for now, I am leaving the double logic - * better safe than sorry... (AIA) * * Locking: - The volume lcn bitmap must be unlocked on entry and is unlocked * on return. * - This function takes the volume lcn bitmap lock for writing and * modifies the bitmap contents. */ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn, const s64 count, const LCN start_lcn, const NTFS_CLUSTER_ALLOCATION_ZONES zone, const bool is_extension) { LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn; LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size; s64 clusters; loff_t i_size; struct inode *lcnbmp_vi; runlist_element *rl = NULL; struct address_space *mapping; struct page *page = NULL; u8 *buf, *byte; int err = 0, rlpos, rlsize, buf_size; u8 pass, done_zones, search_zone, need_writeback = 0, bit; ntfs_debug("Entering for start_vcn 0x%llx, count 0x%llx, start_lcn " "0x%llx, zone %s_ZONE.", (unsigned long long)start_vcn, (unsigned long long)count, (unsigned long long)start_lcn, zone == MFT_ZONE ? "MFT" : "DATA"); BUG_ON(!vol); lcnbmp_vi = vol->lcnbmp_ino; BUG_ON(!lcnbmp_vi); BUG_ON(start_vcn < 0); BUG_ON(count < 0); BUG_ON(start_lcn < -1); BUG_ON(zone < FIRST_ZONE); BUG_ON(zone > LAST_ZONE); /* Return NULL if @count is zero. */ if (!count) return NULL; /* Take the lcnbmp lock for writing. */ down_write(&vol->lcnbmp_lock); /* * If no specific @start_lcn was requested, use the current data zone * position, otherwise use the requested @start_lcn but make sure it * lies outside the mft zone. Also set done_zones to 0 (no zones done) * and pass depending on whether we are starting inside a zone (1) or * at the beginning of a zone (2). If requesting from the MFT_ZONE, * we either start at the current position within the mft zone or at * the specified position. If the latter is out of bounds then we start * at the beginning of the MFT_ZONE. */ done_zones = 0; pass = 1; /* * zone_start and zone_end are the current search range. search_zone * is 1 for mft zone, 2 for data zone 1 (end of mft zone till end of * volume) and 4 for data zone 2 (start of volume till start of mft * zone). */ zone_start = start_lcn; if (zone_start < 0) { if (zone == DATA_ZONE) zone_start = vol->data1_zone_pos; else zone_start = vol->mft_zone_pos; if (!zone_start) { /* * Zone starts at beginning of volume which means a * single pass is sufficient. */ pass = 2; } } else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start && zone_start < vol->mft_zone_end) { zone_start = vol->mft_zone_end; /* * Starting at beginning of data1_zone which means a single * pass in this zone is sufficient. */ pass = 2; } else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start || zone_start >= vol->mft_zone_end)) { zone_start = vol->mft_lcn; if (!vol->mft_zone_end) zone_start = 0; /* * Starting at beginning of volume which means a single pass * is sufficient. */ pass = 2; } if (zone == MFT_ZONE) { zone_end = vol->mft_zone_end; search_zone = 1; } else /* if (zone == DATA_ZONE) */ { /* Skip searching the mft zone. */ done_zones |= 1; if (zone_start >= vol->mft_zone_end) { zone_end = vol->nr_clusters; search_zone = 2; } else { zone_end = vol->mft_zone_start; search_zone = 4; } } /* * bmp_pos is the current bit position inside the bitmap. We use * bmp_initial_pos to determine whether or not to do a zone switch. */ bmp_pos = bmp_initial_pos = zone_start; /* Loop until all clusters are allocated, i.e. clusters == 0. */ clusters = count; rlpos = rlsize = 0; mapping = lcnbmp_vi->i_mapping; i_size = i_size_read(lcnbmp_vi); while (1) { ntfs_debug("Start of outer while loop: done_zones 0x%x, " "search_zone %i, pass %i, zone_start 0x%llx, " "zone_end 0x%llx, bmp_initial_pos 0x%llx, " "bmp_pos 0x%llx, rlpos %i, rlsize %i.", done_zones, search_zone, pass, (unsigned long long)zone_start, (unsigned long long)zone_end, (unsigned long long)bmp_initial_pos, (unsigned long long)bmp_pos, rlpos, rlsize); /* Loop until we run out of free clusters. */ last_read_pos = bmp_pos >> 3; ntfs_debug("last_read_pos 0x%llx.", (unsigned long long)last_read_pos); if (last_read_pos > i_size) { ntfs_debug("End of attribute reached. " "Skipping to zone_pass_done."); goto zone_pass_done; } if (likely(page)) { if (need_writeback) { ntfs_debug("Marking page dirty."); flush_dcache_page(page); set_page_dirty(page); need_writeback = 0; } ntfs_unmap_page(page); } page = ntfs_map_page(mapping, last_read_pos >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) { err = PTR_ERR(page); ntfs_error(vol->sb, "Failed to map page."); goto out; } buf_size = last_read_pos & ~PAGE_CACHE_MASK; buf = page_address(page) + buf_size; buf_size = PAGE_CACHE_SIZE - buf_size; if (unlikely(last_read_pos + buf_size > i_size)) buf_size = i_size - last_read_pos; buf_size <<= 3; lcn = bmp_pos & 7; bmp_pos &= ~(LCN)7; ntfs_debug("Before inner while loop: buf_size %i, lcn 0x%llx, " "bmp_pos 0x%llx, need_writeback %i.", buf_size, (unsigned long long)lcn, (unsigned long long)bmp_pos, need_writeback); while (lcn < buf_size && lcn + bmp_pos < zone_end) { byte = buf + (lcn >> 3); ntfs_debug("In inner while loop: buf_size %i, " "lcn 0x%llx, bmp_pos 0x%llx, " "need_writeback %i, byte ofs 0x%x, " "*byte 0x%x.", buf_size, (unsigned long long)lcn, (unsigned long long)bmp_pos, need_writeback, (unsigned int)(lcn >> 3), (unsigned int)*byte); /* Skip full bytes. */ if (*byte == 0xff) { lcn = (lcn + 8) & ~(LCN)7; ntfs_debug("Continuing while loop 1."); continue; } bit = 1 << (lcn & 7); ntfs_debug("bit 0x%x.", bit); /* If the bit is already set, go onto the next one. */ if (*byte & bit) { lcn++; ntfs_debug("Continuing while loop 2."); continue; } /* * Allocate more memory if needed, including space for * the terminator element. * ntfs_malloc_nofs() operates on whole pages only. */ if ((rlpos + 2) * sizeof(*rl) > rlsize) { runlist_element *rl2; ntfs_debug("Reallocating memory."); if (!rl) ntfs_debug("First free bit is at LCN " "0x%llx.", (unsigned long long) (lcn + bmp_pos)); rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE); if (unlikely(!rl2)) { err = -ENOMEM; ntfs_error(vol->sb, "Failed to " "allocate memory."); goto out; } memcpy(rl2, rl, rlsize); ntfs_free(rl); rl = rl2; rlsize += PAGE_SIZE; ntfs_debug("Reallocated memory, rlsize 0x%x.", rlsize); } /* Allocate the bitmap bit. */ *byte |= bit; /* We need to write this bitmap page to disk. */ need_writeback = 1; ntfs_debug("*byte 0x%x, need_writeback is set.", (unsigned int)*byte); /* * Coalesce with previous run if adjacent LCNs. * Otherwise, append a new run. */ ntfs_debug("Adding run (lcn 0x%llx, len 0x%llx), " "prev_lcn 0x%llx, lcn 0x%llx, " "bmp_pos 0x%llx, prev_run_len 0x%llx, " "rlpos %i.", (unsigned long long)(lcn + bmp_pos), 1ULL, (unsigned long long)prev_lcn, (unsigned long long)lcn, (unsigned long long)bmp_pos, (unsigned long long)prev_run_len, rlpos); if (prev_lcn == lcn + bmp_pos - prev_run_len && rlpos) { ntfs_debug("Coalescing to run (lcn 0x%llx, " "len 0x%llx).", (unsigned long long) rl[rlpos - 1].lcn, (unsigned long long) rl[rlpos - 1].length); rl[rlpos - 1].length = ++prev_run_len; ntfs_debug("Run now (lcn 0x%llx, len 0x%llx), " "prev_run_len 0x%llx.", (unsigned long long) rl[rlpos - 1].lcn, (unsigned long long) rl[rlpos - 1].length, (unsigned long long) prev_run_len); } else { if (likely(rlpos)) { ntfs_debug("Adding new run, (previous " "run lcn 0x%llx, " "len 0x%llx).", (unsigned long long) rl[rlpos - 1].lcn, (unsigned long long) rl[rlpos - 1].length); rl[rlpos].vcn = rl[rlpos - 1].vcn + prev_run_len; } else { ntfs_debug("Adding new run, is first " "run."); rl[rlpos].vcn = start_vcn; } rl[rlpos].lcn = prev_lcn = lcn + bmp_pos; rl[rlpos].length = prev_run_len = 1; rlpos++; } /* Done? */ if (!--clusters) { LCN tc; /* * Update the current zone position. Positions * of already scanned zones have been updated * during the respective zone switches. */ tc = lcn + bmp_pos + 1; ntfs_debug("Done. Updating current zone " "position, tc 0x%llx, " "search_zone %i.", (unsigned long long)tc, search_zone); switch (search_zone) { case 1: ntfs_debug("Before checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); if (tc >= vol->mft_zone_end) { vol->mft_zone_pos = vol->mft_lcn; if (!vol->mft_zone_end) vol->mft_zone_pos = 0; } else if ((bmp_initial_pos >= vol->mft_zone_pos || tc > vol->mft_zone_pos) && tc >= vol->mft_lcn) vol->mft_zone_pos = tc; ntfs_debug("After checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); break; case 2: ntfs_debug("Before checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); if (tc >= vol->nr_clusters) vol->data1_zone_pos = vol->mft_zone_end; else if ((bmp_initial_pos >= vol->data1_zone_pos || tc > vol->data1_zone_pos) && tc >= vol->mft_zone_end) vol->data1_zone_pos = tc; ntfs_debug("After checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); break; case 4: ntfs_debug("Before checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); if (tc >= vol->mft_zone_start) vol->data2_zone_pos = 0; else if (bmp_initial_pos >= vol->data2_zone_pos || tc > vol->data2_zone_pos) vol->data2_zone_pos = tc; ntfs_debug("After checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); break; default: BUG(); } ntfs_debug("Finished. Going to out."); goto out; } lcn++; } bmp_pos += buf_size; ntfs_debug("After inner while loop: buf_size 0x%x, lcn " "0x%llx, bmp_pos 0x%llx, need_writeback %i.", buf_size, (unsigned long long)lcn, (unsigned long long)bmp_pos, need_writeback); if (bmp_pos < zone_end) { ntfs_debug("Continuing outer while loop, " "bmp_pos 0x%llx, zone_end 0x%llx.", (unsigned long long)bmp_pos, (unsigned long long)zone_end); continue; } zone_pass_done: /* Finished with the current zone pass. */ ntfs_debug("At zone_pass_done, pass %i.", pass); if (pass == 1) { /* * Now do pass 2, scanning the first part of the zone * we omitted in pass 1. */ pass = 2; zone_end = zone_start; switch (search_zone) { case 1: /* mft_zone */ zone_start = vol->mft_zone_start; break; case 2: /* data1_zone */ zone_start = vol->mft_zone_end; break; case 4: /* data2_zone */ zone_start = 0; break; default: BUG(); } /* Sanity check. */ if (zone_end < zone_start) zone_end = zone_start; bmp_pos = zone_start; ntfs_debug("Continuing outer while loop, pass 2, " "zone_start 0x%llx, zone_end 0x%llx, " "bmp_pos 0x%llx.", (unsigned long long)zone_start, (unsigned long long)zone_end, (unsigned long long)bmp_pos); continue; } /* pass == 2 */ done_zones_check: ntfs_debug("At done_zones_check, search_zone %i, done_zones " "before 0x%x, done_zones after 0x%x.", search_zone, done_zones, done_zones | search_zone); done_zones |= search_zone; if (done_zones < 7) { ntfs_debug("Switching zone."); /* Now switch to the next zone we haven't done yet. */ pass = 1; switch (search_zone) { case 1: ntfs_debug("Switching from mft zone to data1 " "zone."); /* Update mft zone position. */ if (rlpos) { LCN tc; ntfs_debug("Before checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); tc = rl[rlpos - 1].lcn + rl[rlpos - 1].length; if (tc >= vol->mft_zone_end) { vol->mft_zone_pos = vol->mft_lcn; if (!vol->mft_zone_end) vol->mft_zone_pos = 0; } else if ((bmp_initial_pos >= vol->mft_zone_pos || tc > vol->mft_zone_pos) && tc >= vol->mft_lcn) vol->mft_zone_pos = tc; ntfs_debug("After checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); } /* Switch from mft zone to data1 zone. */ switch_to_data1_zone: search_zone = 2; zone_start = bmp_initial_pos = vol->data1_zone_pos; zone_end = vol->nr_clusters; if (zone_start == vol->mft_zone_end) pass = 2; if (zone_start >= zone_end) { vol->data1_zone_pos = zone_start = vol->mft_zone_end; pass = 2; } break; case 2: ntfs_debug("Switching from data1 zone to " "data2 zone."); /* Update data1 zone position. */ if (rlpos) { LCN tc; ntfs_debug("Before checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); tc = rl[rlpos - 1].lcn + rl[rlpos - 1].length; if (tc >= vol->nr_clusters) vol->data1_zone_pos = vol->mft_zone_end; else if ((bmp_initial_pos >= vol->data1_zone_pos || tc > vol->data1_zone_pos) && tc >= vol->mft_zone_end) vol->data1_zone_pos = tc; ntfs_debug("After checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); } /* Switch from data1 zone to data2 zone. */ search_zone = 4; zone_start = bmp_initial_pos = vol->data2_zone_pos; zone_end = vol->mft_zone_start; if (!zone_start) pass = 2; if (zone_start >= zone_end) { vol->data2_zone_pos = zone_start = bmp_initial_pos = 0; pass = 2; } break; case 4: ntfs_debug("Switching from data2 zone to " "data1 zone."); /* Update data2 zone position. */ if (rlpos) { LCN tc; ntfs_debug("Before checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); tc = rl[rlpos - 1].lcn + rl[rlpos - 1].length; if (tc >= vol->mft_zone_start) vol->data2_zone_pos = 0; else if (bmp_initial_pos >= vol->data2_zone_pos || tc > vol->data2_zone_pos) vol->data2_zone_pos = tc; ntfs_debug("After checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); } /* Switch from data2 zone to data1 zone. */ goto switch_to_data1_zone; default: BUG(); } ntfs_debug("After zone switch, search_zone %i, " "pass %i, bmp_initial_pos 0x%llx, " "zone_start 0x%llx, zone_end 0x%llx.", search_zone, pass, (unsigned long long)bmp_initial_pos, (unsigned long long)zone_start, (unsigned long long)zone_end); bmp_pos = zone_start; if (zone_start == zone_end) { ntfs_debug("Empty zone, going to " "done_zones_check."); /* Empty zone. Don't bother searching it. */ goto done_zones_check; } ntfs_debug("Continuing outer while loop."); continue; } /* done_zones == 7 */ ntfs_debug("All zones are finished."); /* * All zones are finished! If DATA_ZONE, shrink mft zone. If * MFT_ZONE, we have really run out of space. */ mft_zone_size = vol->mft_zone_end - vol->mft_zone_start; ntfs_debug("vol->mft_zone_start 0x%llx, vol->mft_zone_end " "0x%llx, mft_zone_size 0x%llx.", (unsigned long long)vol->mft_zone_start, (unsigned long long)vol->mft_zone_end, (unsigned long long)mft_zone_size); if (zone == MFT_ZONE || mft_zone_size <= 0) { ntfs_debug("No free clusters left, going to out."); /* Really no more space left on device. */ err = -ENOSPC; goto out; } /* zone == DATA_ZONE && mft_zone_size > 0 */ ntfs_debug("Shrinking mft zone."); zone_end = vol->mft_zone_end; mft_zone_size >>= 1; if (mft_zone_size > 0) vol->mft_zone_end = vol->mft_zone_start + mft_zone_size; else /* mft zone and data2 zone no longer exist. */ vol->data2_zone_pos = vol->mft_zone_start = vol->mft_zone_end = 0; if (vol->mft_zone_pos >= vol->mft_zone_end) { vol->mft_zone_pos = vol->mft_lcn; if (!vol->mft_zone_end) vol->mft_zone_pos = 0; } bmp_pos = zone_start = bmp_initial_pos = vol->data1_zone_pos = vol->mft_zone_end; search_zone = 2; pass = 2; done_zones &= ~2; ntfs_debug("After shrinking mft zone, mft_zone_size 0x%llx, " "vol->mft_zone_start 0x%llx, " "vol->mft_zone_end 0x%llx, " "vol->mft_zone_pos 0x%llx, search_zone 2, " "pass 2, dones_zones 0x%x, zone_start 0x%llx, " "zone_end 0x%llx, vol->data1_zone_pos 0x%llx, " "continuing outer while loop.", (unsigned long long)mft_zone_size, (unsigned long long)vol->mft_zone_start, (unsigned long long)vol->mft_zone_end, (unsigned long long)vol->mft_zone_pos, done_zones, (unsigned long long)zone_start, (unsigned long long)zone_end, (unsigned long long)vol->data1_zone_pos); } ntfs_debug("After outer while loop."); out: ntfs_debug("At out."); /* Add runlist terminator element. */ if (likely(rl)) { rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length; rl[rlpos].lcn = is_extension ? LCN_ENOENT : LCN_RL_NOT_MAPPED; rl[rlpos].length = 0; } if (likely(page && !IS_ERR(page))) { if (need_writeback) { ntfs_debug("Marking page dirty."); flush_dcache_page(page); set_page_dirty(page); need_writeback = 0; } ntfs_unmap_page(page); } if (likely(!err)) { up_write(&vol->lcnbmp_lock); ntfs_debug("Done."); return rl; } ntfs_error(vol->sb, "Failed to allocate clusters, aborting " "(error %i).", err); if (rl) { int err2; if (err == -ENOSPC) ntfs_debug("Not enough space to complete allocation, " "err -ENOSPC, first free lcn 0x%llx, " "could allocate up to 0x%llx " "clusters.", (unsigned long long)rl[0].lcn, (unsigned long long)(count - clusters)); /* Deallocate all allocated clusters. */ ntfs_debug("Attempting rollback..."); err2 = ntfs_cluster_free_from_rl_nolock(vol, rl); if (err2) { ntfs_error(vol->sb, "Failed to rollback (error %i). " "Leaving inconsistent metadata! " "Unmount and run chkdsk.", err2); NVolSetErrors(vol); } /* Free the runlist. */ ntfs_free(rl); } else if (err == -ENOSPC) ntfs_debug("No space left at all, err = -ENOSPC, first free " "lcn = 0x%llx.", (long long)vol->data1_zone_pos); up_write(&vol->lcnbmp_lock); return ERR_PTR(err); } /** * __ntfs_cluster_free - free clusters on an ntfs volume * @ni: ntfs inode whose runlist describes the clusters to free * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters * @count: number of clusters to free or -1 for all clusters * @ctx: active attribute search context if present or NULL if not * @is_rollback: true if this is a rollback operation * * Free @count clusters starting at the cluster @start_vcn in the runlist * described by the vfs inode @ni. * * If @count is -1, all clusters from @start_vcn to the end of the runlist are * deallocated. Thus, to completely free all clusters in a runlist, use * @start_vcn = 0 and @count = -1. * * If @ctx is specified, it is an active search context of @ni and its base mft * record. This is needed when __ntfs_cluster_free() encounters unmapped * runlist fragments and allows their mapping. If you do not have the mft * record mapped, you can specify @ctx as NULL and __ntfs_cluster_free() will * perform the necessary mapping and unmapping. * * Note, __ntfs_cluster_free() saves the state of @ctx on entry and restores it * before returning. Thus, @ctx will be left pointing to the same attribute on * return as on entry. However, the actual pointers in @ctx may point to * different memory locations on return, so you must remember to reset any * cached pointers from the @ctx, i.e. after the call to __ntfs_cluster_free(), * you will probably want to do: * m = ctx->mrec; * a = ctx->attr; * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that * you cache ctx->mrec in a variable @m of type MFT_RECORD *. * * @is_rollback should always be 'false', it is for internal use to rollback * errors. You probably want to use ntfs_cluster_free() instead. * * Note, __ntfs_cluster_free() does not modify the runlist, so you have to * remove from the runlist or mark sparse the freed runs later. * * Return the number of deallocated clusters (not counting sparse ones) on * success and -errno on error. * * WARNING: If @ctx is supplied, regardless of whether success or failure is * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx * is no longer valid, i.e. you need to either call * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. * In that case PTR_ERR(@ctx->mrec) will give you the error code for * why the mapping of the old inode failed. * * Locking: - The runlist described by @ni must be locked for writing on entry * and is locked on return. Note the runlist may be modified when * needed runlist fragments need to be mapped. * - The volume lcn bitmap must be unlocked on entry and is unlocked * on return. * - This function takes the volume lcn bitmap lock for writing and * modifies the bitmap contents. * - If @ctx is NULL, the base mft record of @ni must not be mapped on * entry and it will be left unmapped on return. * - If @ctx is not NULL, the base mft record must be mapped on entry * and it will be left mapped on return. */ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, ntfs_attr_search_ctx *ctx, const bool is_rollback) { s64 delta, to_free, total_freed, real_freed; ntfs_volume *vol; struct inode *lcnbmp_vi; runlist_element *rl; int err; BUG_ON(!ni); ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " "0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn, (unsigned long long)count, is_rollback ? " (rollback)" : ""); vol = ni->vol; lcnbmp_vi = vol->lcnbmp_ino; BUG_ON(!lcnbmp_vi); BUG_ON(start_vcn < 0); BUG_ON(count < -1); /* * Lock the lcn bitmap for writing but only if not rolling back. We * must hold the lock all the way including through rollback otherwise * rollback is not possible because once we have cleared a bit and * dropped the lock, anyone could have set the bit again, thus * allocating the cluster for another use. */ if (likely(!is_rollback)) down_write(&vol->lcnbmp_lock); total_freed = real_freed = 0; rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, ctx); if (IS_ERR(rl)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to find first runlist " "element (error %li), aborting.", PTR_ERR(rl)); err = PTR_ERR(rl); goto err_out; } if (unlikely(rl->lcn < LCN_HOLE)) { if (!is_rollback) ntfs_error(vol->sb, "First runlist element has " "invalid lcn, aborting."); err = -EIO; goto err_out; } /* Find the starting cluster inside the run that needs freeing. */ delta = start_vcn - rl->vcn; /* The number of clusters in this run that need freeing. */ to_free = rl->length - delta; if (count >= 0 && to_free > count) to_free = count; if (likely(rl->lcn >= 0)) { /* Do the actual freeing of the clusters in this run. */ err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta, to_free, likely(!is_rollback) ? 0 : 1); if (unlikely(err)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to clear first run " "(error %i), aborting.", err); goto err_out; } /* We have freed @to_free real clusters. */ real_freed = to_free; }; /* Go to the next run and adjust the number of clusters left to free. */ ++rl; if (count >= 0) count -= to_free; /* Keep track of the total "freed" clusters, including sparse ones. */ total_freed = to_free; /* * Loop over the remaining runs, using @count as a capping value, and * free them. */ for (; rl->length && count != 0; ++rl) { if (unlikely(rl->lcn < LCN_HOLE)) { VCN vcn; /* Attempt to map runlist. */ vcn = rl->vcn; rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx); if (IS_ERR(rl)) { err = PTR_ERR(rl); if (!is_rollback) ntfs_error(vol->sb, "Failed to map " "runlist fragment or " "failed to find " "subsequent runlist " "element."); goto err_out; } if (unlikely(rl->lcn < LCN_HOLE)) { if (!is_rollback) ntfs_error(vol->sb, "Runlist element " "has invalid lcn " "(0x%llx).", (unsigned long long) rl->lcn); err = -EIO; goto err_out; } } /* The number of clusters in this run that need freeing. */ to_free = rl->length; if (count >= 0 && to_free > count) to_free = count; if (likely(rl->lcn >= 0)) { /* Do the actual freeing of the clusters in the run. */ err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn, to_free, likely(!is_rollback) ? 0 : 1); if (unlikely(err)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to clear " "subsequent run."); goto err_out; } /* We have freed @to_free real clusters. */ real_freed += to_free; } /* Adjust the number of clusters left to free. */ if (count >= 0) count -= to_free; /* Update the total done clusters. */ total_freed += to_free; } if (likely(!is_rollback)) up_write(&vol->lcnbmp_lock); BUG_ON(count > 0); /* We are done. Return the number of actually freed clusters. */ ntfs_debug("Done."); return real_freed; err_out: if (is_rollback) return err; /* If no real clusters were freed, no need to rollback. */ if (!real_freed) { up_write(&vol->lcnbmp_lock); return err; } /* * Attempt to rollback and if that succeeds just return the error code. * If rollback fails, set the volume errors flag, emit an error * message, and return the error code. */ delta = __ntfs_cluster_free(ni, start_vcn, total_freed, ctx, true); if (delta < 0) { ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " "inconsistent metadata! Unmount and run " "chkdsk.", (int)delta); NVolSetErrors(vol); } up_write(&vol->lcnbmp_lock); ntfs_error(vol->sb, "Aborting (error %i).", err); return err; } #endif /* NTFS_RW */
gpl-2.0
nadavitay/linux-3.14.1
net/sched/sch_api.c
59
45419
/* * net/sched/sch_api.c Packet scheduler API. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Fixes: * * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired. * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/hrtimer.h> #include <linux/lockdep.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/netlink.h> #include <net/pkt_sched.h> static int qdisc_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new); static int tclass_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct Qdisc *q, unsigned long cl, int event); /* Short review. ------------- This file consists of two interrelated parts: 1. queueing disciplines manager frontend. 2. traffic classes manager frontend. Generally, queueing discipline ("qdisc") is a black box, which is able to enqueue packets and to dequeue them (when device is ready to send something) in order and at times determined by algorithm hidden in it. qdisc's are divided to two categories: - "queues", which have no internal structure visible from outside. - "schedulers", which split all the packets to "traffic classes", using "packet classifiers" (look at cls_api.c) In turn, classes may have child qdiscs (as rule, queues) attached to them etc. etc. etc. The goal of the routines in this file is to translate information supplied by user in the form of handles to more intelligible for kernel form, to make some sanity checks and part of work, which is common to all qdiscs and to provide rtnetlink notifications. All real intelligent work is done inside qdisc modules. Every discipline has two major routines: enqueue and dequeue. ---dequeue dequeue usually returns a skb to send. It is allowed to return NULL, but it does not mean that queue is empty, it just means that discipline does not want to send anything this time. Queue is really empty if q->q.qlen == 0. For complicated disciplines with multiple queues q->q is not real packet queue, but however q->q.qlen must be valid. ---enqueue enqueue returns 0, if packet was enqueued successfully. If packet (this one or another one) was dropped, it returns not zero error code. NET_XMIT_DROP - this packet dropped Expected action: do not backoff, but wait until queue will clear. NET_XMIT_CN - probably this packet enqueued, but another one dropped. Expected action: backoff or ignore NET_XMIT_POLICED - dropped by police. Expected action: backoff or error to real-time apps. Auxiliary routines: ---peek like dequeue but without removing a packet from the queue ---reset returns qdisc to initial state: purge all buffers, clear all timers, counters (except for statistics) etc. ---init initializes newly created qdisc. ---destroy destroys resources allocated by init and during lifetime of qdisc. ---change changes qdisc parameters. */ /* Protects list of registered TC modules. It is pure SMP lock. */ static DEFINE_RWLOCK(qdisc_mod_lock); /************************************************ * Queueing disciplines manipulation. * ************************************************/ /* The list of all installed queueing disciplines. */ static struct Qdisc_ops *qdisc_base; /* Register/unregister queueing discipline */ int register_qdisc(struct Qdisc_ops *qops) { struct Qdisc_ops *q, **qp; int rc = -EEXIST; write_lock(&qdisc_mod_lock); for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) if (!strcmp(qops->id, q->id)) goto out; if (qops->enqueue == NULL) qops->enqueue = noop_qdisc_ops.enqueue; if (qops->peek == NULL) { if (qops->dequeue == NULL) qops->peek = noop_qdisc_ops.peek; else goto out_einval; } if (qops->dequeue == NULL) qops->dequeue = noop_qdisc_ops.dequeue; if (qops->cl_ops) { const struct Qdisc_class_ops *cops = qops->cl_ops; if (!(cops->get && cops->put && cops->walk && cops->leaf)) goto out_einval; if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf)) goto out_einval; } qops->next = NULL; *qp = qops; rc = 0; out: write_unlock(&qdisc_mod_lock); return rc; out_einval: rc = -EINVAL; goto out; } EXPORT_SYMBOL(register_qdisc); int unregister_qdisc(struct Qdisc_ops *qops) { struct Qdisc_ops *q, **qp; int err = -ENOENT; write_lock(&qdisc_mod_lock); for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) if (q == qops) break; if (q) { *qp = q->next; q->next = NULL; err = 0; } write_unlock(&qdisc_mod_lock); return err; } EXPORT_SYMBOL(unregister_qdisc); /* Get default qdisc if not otherwise specified */ void qdisc_get_default(char *name, size_t len) { read_lock(&qdisc_mod_lock); strlcpy(name, default_qdisc_ops->id, len); read_unlock(&qdisc_mod_lock); } static struct Qdisc_ops *qdisc_lookup_default(const char *name) { struct Qdisc_ops *q = NULL; for (q = qdisc_base; q; q = q->next) { if (!strcmp(name, q->id)) { if (!try_module_get(q->owner)) q = NULL; break; } } return q; } /* Set new default qdisc to use */ int qdisc_set_default(const char *name) { const struct Qdisc_ops *ops; if (!capable(CAP_NET_ADMIN)) return -EPERM; write_lock(&qdisc_mod_lock); ops = qdisc_lookup_default(name); if (!ops) { /* Not found, drop lock and try to load module */ write_unlock(&qdisc_mod_lock); request_module("sch_%s", name); write_lock(&qdisc_mod_lock); ops = qdisc_lookup_default(name); } if (ops) { /* Set new default */ module_put(default_qdisc_ops->owner); default_qdisc_ops = ops; } write_unlock(&qdisc_mod_lock); return ops ? 0 : -ENOENT; } /* We know handle. Find qdisc among all qdisc's attached to device (root qdisc, all its children, children of children etc.) */ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) { struct Qdisc *q; if (!(root->flags & TCQ_F_BUILTIN) && root->handle == handle) return root; list_for_each_entry(q, &root->list, list) { if (q->handle == handle) return q; } return NULL; } void qdisc_list_add(struct Qdisc *q) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { struct Qdisc *root = qdisc_dev(q)->qdisc; WARN_ON_ONCE(root == &noop_qdisc); list_add_tail(&q->list, &root->list); } } EXPORT_SYMBOL(qdisc_list_add); void qdisc_list_del(struct Qdisc *q) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) list_del(&q->list); } EXPORT_SYMBOL(qdisc_list_del); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { struct Qdisc *q; q = qdisc_match_from_root(dev->qdisc, handle); if (q) goto out; if (dev_ingress_queue(dev)) q = qdisc_match_from_root( dev_ingress_queue(dev)->qdisc_sleeping, handle); out: return q; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; struct Qdisc *leaf; const struct Qdisc_class_ops *cops = p->ops->cl_ops; if (cops == NULL) return NULL; cl = cops->get(p, classid); if (cl == 0) return NULL; leaf = cops->leaf(p, cl); cops->put(p, cl); return leaf; } /* Find queueing discipline by name */ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) { struct Qdisc_ops *q = NULL; if (kind) { read_lock(&qdisc_mod_lock); for (q = qdisc_base; q; q = q->next) { if (nla_strcmp(kind, q->id) == 0) { if (!try_module_get(q->owner)) q = NULL; break; } } read_unlock(&qdisc_mod_lock); } return q; } /* The linklayer setting were not transferred from iproute2, in older * versions, and the rate tables lookup systems have been dropped in * the kernel. To keep backward compatible with older iproute2 tc * utils, we detect the linklayer setting by detecting if the rate * table were modified. * * For linklayer ATM table entries, the rate table will be aligned to * 48 bytes, thus some table entries will contain the same value. The * mpu (min packet unit) is also encoded into the old rate table, thus * starting from the mpu, we find low and high table entries for * mapping this cell. If these entries contain the same value, when * the rate tables have been modified for linklayer ATM. * * This is done by rounding mpu to the nearest 48 bytes cell/entry, * and then roundup to the next cell, calc the table entry one below, * and compare. */ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) { int low = roundup(r->mpu, 48); int high = roundup(low+1, 48); int cell_low = low >> r->cell_log; int cell_high = (high >> r->cell_log) - 1; /* rtab is too inaccurate at rates > 100Mbit/s */ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { pr_debug("TC linklayer: Giving up ATM detection\n"); return TC_LINKLAYER_ETHERNET; } if ((cell_high > cell_low) && (cell_high < 256) && (rtab[cell_low] == rtab[cell_high])) { pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", cell_low, cell_high, rtab[cell_high]); return TC_LINKLAYER_ATM; } return TC_LINKLAYER_ETHERNET; } static struct qdisc_rate_table *qdisc_rtab_list; struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) { struct qdisc_rate_table *rtab; if (tab == NULL || r->rate == 0 || r->cell_log == 0 || nla_len(tab) != TC_RTAB_SIZE) return NULL; for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) && !memcmp(&rtab->data, nla_data(tab), 1024)) { rtab->refcnt++; return rtab; } } rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); if (rtab) { rtab->rate = *r; rtab->refcnt = 1; memcpy(rtab->data, nla_data(tab), 1024); if (r->linklayer == TC_LINKLAYER_UNAWARE) r->linklayer = __detect_linklayer(r, rtab->data); rtab->next = qdisc_rtab_list; qdisc_rtab_list = rtab; } return rtab; } EXPORT_SYMBOL(qdisc_get_rtab); void qdisc_put_rtab(struct qdisc_rate_table *tab) { struct qdisc_rate_table *rtab, **rtabp; if (!tab || --tab->refcnt) return; for (rtabp = &qdisc_rtab_list; (rtab = *rtabp) != NULL; rtabp = &rtab->next) { if (rtab == tab) { *rtabp = rtab->next; kfree(rtab); return; } } } EXPORT_SYMBOL(qdisc_put_rtab); static LIST_HEAD(qdisc_stab_list); static DEFINE_SPINLOCK(qdisc_stab_lock); static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, [TCA_STAB_DATA] = { .type = NLA_BINARY }, }; static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) { struct nlattr *tb[TCA_STAB_MAX + 1]; struct qdisc_size_table *stab; struct tc_sizespec *s; unsigned int tsize = 0; u16 *tab = NULL; int err; err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy); if (err < 0) return ERR_PTR(err); if (!tb[TCA_STAB_BASE]) return ERR_PTR(-EINVAL); s = nla_data(tb[TCA_STAB_BASE]); if (s->tsize > 0) { if (!tb[TCA_STAB_DATA]) return ERR_PTR(-EINVAL); tab = nla_data(tb[TCA_STAB_DATA]); tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); } if (tsize != s->tsize || (!tab && tsize > 0)) return ERR_PTR(-EINVAL); spin_lock(&qdisc_stab_lock); list_for_each_entry(stab, &qdisc_stab_list, list) { if (memcmp(&stab->szopts, s, sizeof(*s))) continue; if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) continue; stab->refcnt++; spin_unlock(&qdisc_stab_lock); return stab; } spin_unlock(&qdisc_stab_lock); stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); if (!stab) return ERR_PTR(-ENOMEM); stab->refcnt = 1; stab->szopts = *s; if (tsize > 0) memcpy(stab->data, tab, tsize * sizeof(u16)); spin_lock(&qdisc_stab_lock); list_add_tail(&stab->list, &qdisc_stab_list); spin_unlock(&qdisc_stab_lock); return stab; } static void stab_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct qdisc_size_table, rcu)); } void qdisc_put_stab(struct qdisc_size_table *tab) { if (!tab) return; spin_lock(&qdisc_stab_lock); if (--tab->refcnt == 0) { list_del(&tab->list); call_rcu_bh(&tab->rcu, stab_kfree_rcu); } spin_unlock(&qdisc_stab_lock); } EXPORT_SYMBOL(qdisc_put_stab); static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) { struct nlattr *nest; nest = nla_nest_start(skb, TCA_STAB); if (nest == NULL) goto nla_put_failure; if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts)) goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; nla_put_failure: return -1; } void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab) { int pkt_len, slot; pkt_len = skb->len + stab->szopts.overhead; if (unlikely(!stab->szopts.tsize)) goto out; slot = pkt_len + stab->szopts.cell_align; if (unlikely(slot < 0)) slot = 0; slot >>= stab->szopts.cell_log; if (likely(slot < stab->szopts.tsize)) pkt_len = stab->data[slot]; else pkt_len = stab->data[stab->szopts.tsize - 1] * (slot / stab->szopts.tsize) + stab->data[slot % stab->szopts.tsize]; pkt_len <<= stab->szopts.size_log; out: if (unlikely(pkt_len < 1)) pkt_len = 1; qdisc_skb_cb(skb)->pkt_len = pkt_len; } EXPORT_SYMBOL(__qdisc_calculate_pkt_len); void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) { if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", txt, qdisc->ops->id, qdisc->handle >> 16); qdisc->flags |= TCQ_F_WARN_NONWC; } } EXPORT_SYMBOL(qdisc_warn_nonwc); static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) { struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, timer); qdisc_unthrottled(wd->qdisc); __netif_schedule(qdisc_root(wd->qdisc)); return HRTIMER_NORESTART; } void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) { hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); wd->timer.function = qdisc_watchdog; wd->qdisc = qdisc; } EXPORT_SYMBOL(qdisc_watchdog_init); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) { if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc_root_sleeping(wd->qdisc)->state)) return; qdisc_throttled(wd->qdisc); hrtimer_start(&wd->timer, ns_to_ktime(expires), HRTIMER_MODE_ABS); } EXPORT_SYMBOL(qdisc_watchdog_schedule_ns); void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) { hrtimer_cancel(&wd->timer); qdisc_unthrottled(wd->qdisc); } EXPORT_SYMBOL(qdisc_watchdog_cancel); static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) { unsigned int size = n * sizeof(struct hlist_head), i; struct hlist_head *h; if (size <= PAGE_SIZE) h = kmalloc(size, GFP_KERNEL); else h = (struct hlist_head *) __get_free_pages(GFP_KERNEL, get_order(size)); if (h != NULL) { for (i = 0; i < n; i++) INIT_HLIST_HEAD(&h[i]); } return h; } static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n) { unsigned int size = n * sizeof(struct hlist_head); if (size <= PAGE_SIZE) kfree(h); else free_pages((unsigned long)h, get_order(size)); } void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) { struct Qdisc_class_common *cl; struct hlist_node *next; struct hlist_head *nhash, *ohash; unsigned int nsize, nmask, osize; unsigned int i, h; /* Rehash when load factor exceeds 0.75 */ if (clhash->hashelems * 4 <= clhash->hashsize * 3) return; nsize = clhash->hashsize * 2; nmask = nsize - 1; nhash = qdisc_class_hash_alloc(nsize); if (nhash == NULL) return; ohash = clhash->hash; osize = clhash->hashsize; sch_tree_lock(sch); for (i = 0; i < osize; i++) { hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) { h = qdisc_class_hash(cl->classid, nmask); hlist_add_head(&cl->hnode, &nhash[h]); } } clhash->hash = nhash; clhash->hashsize = nsize; clhash->hashmask = nmask; sch_tree_unlock(sch); qdisc_class_hash_free(ohash, osize); } EXPORT_SYMBOL(qdisc_class_hash_grow); int qdisc_class_hash_init(struct Qdisc_class_hash *clhash) { unsigned int size = 4; clhash->hash = qdisc_class_hash_alloc(size); if (clhash->hash == NULL) return -ENOMEM; clhash->hashsize = size; clhash->hashmask = size - 1; clhash->hashelems = 0; return 0; } EXPORT_SYMBOL(qdisc_class_hash_init); void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) { qdisc_class_hash_free(clhash->hash, clhash->hashsize); } EXPORT_SYMBOL(qdisc_class_hash_destroy); void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash, struct Qdisc_class_common *cl) { unsigned int h; INIT_HLIST_NODE(&cl->hnode); h = qdisc_class_hash(cl->classid, clhash->hashmask); hlist_add_head(&cl->hnode, &clhash->hash[h]); clhash->hashelems++; } EXPORT_SYMBOL(qdisc_class_hash_insert); void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, struct Qdisc_class_common *cl) { hlist_del(&cl->hnode); clhash->hashelems--; } EXPORT_SYMBOL(qdisc_class_hash_remove); /* Allocate an unique handle from space managed by kernel * Possible range is [8000-FFFF]:0000 (0x8000 values) */ static u32 qdisc_alloc_handle(struct net_device *dev) { int i = 0x8000; static u32 autohandle = TC_H_MAKE(0x80000000U, 0); do { autohandle += TC_H_MAKE(0x10000U, 0); if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) autohandle = TC_H_MAKE(0x80000000U, 0); if (!qdisc_lookup(dev, autohandle)) return autohandle; cond_resched(); } while (--i > 0); return 0; } void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) { const struct Qdisc_class_ops *cops; unsigned long cl; u32 parentid; int drops; if (n == 0) return; drops = max_t(int, n, 0); while ((parentid = sch->parent)) { if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) return; sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); if (sch == NULL) { WARN_ON(parentid != TC_H_ROOT); return; } cops = sch->ops->cl_ops; if (cops->qlen_notify) { cl = cops->get(sch, parentid); cops->qlen_notify(sch, cl); cops->put(sch, cl); } sch->q.qlen -= n; sch->qstats.drops += drops; } } EXPORT_SYMBOL(qdisc_tree_decrease_qlen); static void notify_and_destroy(struct net *net, struct sk_buff *skb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new) { if (new || old) qdisc_notify(net, skb, n, clid, old, new); if (old) qdisc_destroy(old); } /* Graft qdisc "new" to class "classid" of qdisc "parent" or * to device "dev". * * When appropriate send a netlink notification using 'skb' * and "n". * * On success, destroy old qdisc. */ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, struct sk_buff *skb, struct nlmsghdr *n, u32 classid, struct Qdisc *new, struct Qdisc *old) { struct Qdisc *q = old; struct net *net = dev_net(dev); int err = 0; if (parent == NULL) { unsigned int i, num_q, ingress; ingress = 0; num_q = dev->num_tx_queues; if ((q && q->flags & TCQ_F_INGRESS) || (new && new->flags & TCQ_F_INGRESS)) { num_q = 1; ingress = 1; if (!dev_ingress_queue(dev)) return -ENOENT; } if (dev->flags & IFF_UP) dev_deactivate(dev); if (new && new->ops->attach) { new->ops->attach(new); num_q = 0; } for (i = 0; i < num_q; i++) { struct netdev_queue *dev_queue = dev_ingress_queue(dev); if (!ingress) dev_queue = netdev_get_tx_queue(dev, i); old = dev_graft_qdisc(dev_queue, new); if (new && i > 0) atomic_inc(&new->refcnt); if (!ingress) qdisc_destroy(old); } if (!ingress) { notify_and_destroy(net, skb, n, classid, dev->qdisc, new); if (new && !new->ops->attach) atomic_inc(&new->refcnt); dev->qdisc = new ? : &noop_qdisc; } else { notify_and_destroy(net, skb, n, classid, old, new); } if (dev->flags & IFF_UP) dev_activate(dev); } else { const struct Qdisc_class_ops *cops = parent->ops->cl_ops; err = -EOPNOTSUPP; if (cops && cops->graft) { unsigned long cl = cops->get(parent, classid); if (cl) { err = cops->graft(parent, cl, new, &old); cops->put(parent, cl); } else err = -ENOENT; } if (!err) notify_and_destroy(net, skb, n, classid, old, new); } return err; } /* lockdep annotation is needed for ingress; egress gets it only for name */ static struct lock_class_key qdisc_tx_lock; static struct lock_class_key qdisc_rx_lock; /* Allocate and initialize new qdisc. Parameters are passed via opt. */ static struct Qdisc * qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, struct Qdisc *p, u32 parent, u32 handle, struct nlattr **tca, int *errp) { int err; struct nlattr *kind = tca[TCA_KIND]; struct Qdisc *sch; struct Qdisc_ops *ops; struct qdisc_size_table *stab; ops = qdisc_lookup_ops(kind); #ifdef CONFIG_MODULES if (ops == NULL && kind != NULL) { char name[IFNAMSIZ]; if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * tell the caller to replay the request. We * indicate this using -EAGAIN. * We replay the request because the device may * go away in the mean time. */ rtnl_unlock(); request_module("sch_%s", name); rtnl_lock(); ops = qdisc_lookup_ops(kind); if (ops != NULL) { /* We will try again qdisc_lookup_ops, * so don't keep a reference. */ module_put(ops->owner); err = -EAGAIN; goto err_out; } } } #endif err = -ENOENT; if (ops == NULL) goto err_out; sch = qdisc_alloc(dev_queue, ops); if (IS_ERR(sch)) { err = PTR_ERR(sch); goto err_out2; } sch->parent = parent; if (handle == TC_H_INGRESS) { sch->flags |= TCQ_F_INGRESS; handle = TC_H_MAKE(TC_H_INGRESS, 0); lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); } else { if (handle == 0) { handle = qdisc_alloc_handle(dev); err = -ENOMEM; if (handle == 0) goto err_out3; } lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); if (!netif_is_multiqueue(dev)) sch->flags |= TCQ_F_ONETXQUEUE; } sch->handle = handle; if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { if (tca[TCA_STAB]) { stab = qdisc_get_stab(tca[TCA_STAB]); if (IS_ERR(stab)) { err = PTR_ERR(stab); goto err_out4; } rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { spinlock_t *root_lock; err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) goto err_out4; if ((sch->parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS) && (!p || !(p->flags & TCQ_F_MQROOT))) root_lock = qdisc_root_sleeping_lock(sch); else root_lock = qdisc_lock(sch); err = gen_new_estimator(&sch->bstats, &sch->rate_est, root_lock, tca[TCA_RATE]); if (err) goto err_out4; } qdisc_list_add(sch); return sch; } err_out3: dev_put(dev); kfree((char *) sch - sch->padded); err_out2: module_put(ops->owner); err_out: *errp = err; return NULL; err_out4: /* * Any broken qdiscs that would require a ops->reset() here? * The qdisc was never in action so it shouldn't be necessary. */ qdisc_put_stab(rtnl_dereference(sch->stab)); if (ops->destroy) ops->destroy(sch); goto err_out3; } static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) { struct qdisc_size_table *ostab, *stab = NULL; int err = 0; if (tca[TCA_OPTIONS]) { if (sch->ops->change == NULL) return -EINVAL; err = sch->ops->change(sch, tca[TCA_OPTIONS]); if (err) return err; } if (tca[TCA_STAB]) { stab = qdisc_get_stab(tca[TCA_STAB]); if (IS_ERR(stab)) return PTR_ERR(stab); } ostab = rtnl_dereference(sch->stab); rcu_assign_pointer(sch->stab, stab); qdisc_put_stab(ostab); if (tca[TCA_RATE]) { /* NB: ignores errors from replace_estimator because change can't be undone. */ if (sch->flags & TCQ_F_MQROOT) goto out; gen_replace_estimator(&sch->bstats, &sch->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); } out: return 0; } struct check_loop_arg { struct qdisc_walker w; struct Qdisc *p; int depth; }; static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w); static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) { struct check_loop_arg arg; if (q->ops->cl_ops == NULL) return 0; arg.w.stop = arg.w.skip = arg.w.count = 0; arg.w.fn = check_loop_fn; arg.depth = depth; arg.p = p; q->ops->cl_ops->walk(q, &arg.w); return arg.w.stop ? -ELOOP : 0; } static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) { struct Qdisc *leaf; const struct Qdisc_class_ops *cops = q->ops->cl_ops; struct check_loop_arg *arg = (struct check_loop_arg *)w; leaf = cops->leaf(q, cl); if (leaf) { if (leaf == arg->p || arg->depth > 7) return -ELOOP; return check_loop(leaf, arg->p, arg->depth + 1); } return 0; } /* * Delete/get qdisc. */ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm = nlmsg_data(n); struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; u32 clid; struct Qdisc *q = NULL; struct Qdisc *p = NULL; int err; if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN)) return -EPERM; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); if (err < 0) return err; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV; clid = tcm->tcm_parent; if (clid) { if (clid != TC_H_ROOT) { if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { p = qdisc_lookup(dev, TC_H_MAJ(clid)); if (!p) return -ENOENT; q = qdisc_leaf(p, clid); } else if (dev_ingress_queue(dev)) { q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; } if (!q) return -ENOENT; if (tcm->tcm_handle && q->handle != tcm->tcm_handle) return -EINVAL; } else { q = qdisc_lookup(dev, tcm->tcm_handle); if (!q) return -ENOENT; } if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; if (n->nlmsg_type == RTM_DELQDISC) { if (!clid) return -EINVAL; if (q->handle == 0) return -ENOENT; err = qdisc_graft(dev, p, skb, n, clid, NULL, q); if (err != 0) return err; } else { qdisc_notify(net, skb, n, clid, NULL, q); } return 0; } /* * Create/change qdisc. */ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm; struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; u32 clid; struct Qdisc *q, *p; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; replay: /* Reinit, just in case something touches this. */ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); if (err < 0) return err; tcm = nlmsg_data(n); clid = tcm->tcm_parent; q = p = NULL; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV; if (clid) { if (clid != TC_H_ROOT) { if (clid != TC_H_INGRESS) { p = qdisc_lookup(dev, TC_H_MAJ(clid)); if (!p) return -ENOENT; q = qdisc_leaf(p, clid); } else if (dev_ingress_queue_create(dev)) { q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; } /* It may be default qdisc, ignore it */ if (q && q->handle == 0) q = NULL; if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { if (tcm->tcm_handle) { if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) return -EEXIST; if (TC_H_MIN(tcm->tcm_handle)) return -EINVAL; q = qdisc_lookup(dev, tcm->tcm_handle); if (!q) goto create_n_graft; if (n->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; if (q == p || (p && check_loop(q, p, 0))) return -ELOOP; atomic_inc(&q->refcnt); goto graft; } else { if (!q) goto create_n_graft; /* This magic test requires explanation. * * We know, that some child q is already * attached to this parent and have choice: * either to change it or to create/graft new one. * * 1. We are allowed to create/graft only * if CREATE and REPLACE flags are set. * * 2. If EXCL is set, requestor wanted to say, * that qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. * For now we select create/graft, if * user gave KIND, which does not match existing. */ if ((n->nlmsg_flags & NLM_F_CREATE) && (n->nlmsg_flags & NLM_F_REPLACE) && ((n->nlmsg_flags & NLM_F_EXCL) || (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)))) goto create_n_graft; } } } else { if (!tcm->tcm_handle) return -EINVAL; q = qdisc_lookup(dev, tcm->tcm_handle); } /* Change qdisc parameters */ if (q == NULL) return -ENOENT; if (n->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; err = qdisc_change(q, tca); if (err == 0) qdisc_notify(net, skb, n, clid, NULL, q); return err; create_n_graft: if (!(n->nlmsg_flags & NLM_F_CREATE)) return -ENOENT; if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) q = qdisc_create(dev, dev_ingress_queue(dev), p, tcm->tcm_parent, tcm->tcm_parent, tca, &err); else err = -ENOENT; } else { struct netdev_queue *dev_queue; if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) dev_queue = p->ops->cl_ops->select_queue(p, tcm); else if (p) dev_queue = p->dev_queue; else dev_queue = netdev_get_tx_queue(dev, 0); q = qdisc_create(dev, dev_queue, p, tcm->tcm_parent, tcm->tcm_handle, tca, &err); } if (q == NULL) { if (err == -EAGAIN) goto replay; return err; } graft: err = qdisc_graft(dev, p, skb, n, clid, q, NULL); if (err) { if (q) qdisc_destroy(q); return err; } return 0; } static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, u32 portid, u32 seq, u16 flags, int event) { struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; struct qdisc_size_table *stab; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) goto out_nlmsg_trim; tcm = nlmsg_data(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; tcm->tcm_ifindex = qdisc_dev(q)->ifindex; tcm->tcm_parent = clid; tcm->tcm_handle = q->handle; tcm->tcm_info = atomic_read(&q->refcnt); if (nla_put_string(skb, TCA_KIND, q->ops->id)) goto nla_put_failure; if (q->ops->dump && q->ops->dump(q, skb) < 0) goto nla_put_failure; q->qstats.qlen = q->q.qlen; stab = rtnl_dereference(q->stab); if (stab && qdisc_dump_stab(skb, stab) < 0) goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) goto nla_put_failure; if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || gnet_stats_copy_queue(&d, &q->qstats) < 0) goto nla_put_failure; if (gnet_stats_finish_copy(&d) < 0) goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; out_nlmsg_trim: nla_put_failure: nlmsg_trim(skb, b); return -1; } static bool tc_qdisc_dump_ignore(struct Qdisc *q) { return (q->flags & TCQ_F_BUILTIN) ? true : false; } static int qdisc_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (old && !tc_qdisc_dump_ignore(old)) { if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) goto err_out; } if (new && !tc_qdisc_dump_ignore(new)) { if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) goto err_out; } if (skb->len) return rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); err_out: kfree_skb(skb); return -EINVAL; } static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, int *q_idx_p, int s_q_idx) { int ret = 0, q_idx = *q_idx_p; struct Qdisc *q; if (!root) return 0; q = root; if (q_idx < s_q_idx) { q_idx++; } else { if (!tc_qdisc_dump_ignore(q) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; q_idx++; } list_for_each_entry(q, &root->list, list) { if (q_idx < s_q_idx) { q_idx++; continue; } if (!tc_qdisc_dump_ignore(q) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; q_idx++; } out: *q_idx_p = q_idx; return ret; done: ret = -1; goto out; } static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx, q_idx; int s_idx, s_q_idx; struct net_device *dev; s_idx = cb->args[0]; s_q_idx = q_idx = cb->args[1]; rcu_read_lock(); idx = 0; for_each_netdev_rcu(net, dev) { struct netdev_queue *dev_queue; if (idx < s_idx) goto cont; if (idx > s_idx) s_q_idx = 0; q_idx = 0; if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) goto done; cont: idx++; } done: rcu_read_unlock(); cb->args[0] = idx; cb->args[1] = q_idx; return skb->len; } /************************************************ * Traffic classes manipulation. * ************************************************/ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm = nlmsg_data(n); struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; struct Qdisc *q = NULL; const struct Qdisc_class_ops *cops; unsigned long cl = 0; unsigned long new_cl; u32 portid; u32 clid; u32 qid; int err; if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN)) return -EPERM; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); if (err < 0) return err; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return -ENODEV; /* parent == TC_H_UNSPEC - unspecified parent. parent == TC_H_ROOT - class is root, which has no parent. parent == X:0 - parent is root class. parent == X:Y - parent is a node in hierarchy. parent == 0:Y - parent is X:Y, where X:0 is qdisc. handle == 0:0 - generate handle from kernel pool. handle == 0:Y - class is X:Y, where X:0 is qdisc. handle == X:Y - clear. handle == X:0 - root class. */ /* Step 1. Determine qdisc handle X:0 */ portid = tcm->tcm_parent; clid = tcm->tcm_handle; qid = TC_H_MAJ(clid); if (portid != TC_H_ROOT) { u32 qid1 = TC_H_MAJ(portid); if (qid && qid1) { /* If both majors are known, they must be identical. */ if (qid != qid1) return -EINVAL; } else if (qid1) { qid = qid1; } else if (qid == 0) qid = dev->qdisc->handle; /* Now qid is genuine qdisc handle consistent * both with parent and child. * * TC_H_MAJ(portid) still may be unspecified, complete it now. */ if (portid) portid = TC_H_MAKE(qid, portid); } else { if (qid == 0) qid = dev->qdisc->handle; } /* OK. Locate qdisc */ q = qdisc_lookup(dev, qid); if (!q) return -ENOENT; /* An check that it supports classes */ cops = q->ops->cl_ops; if (cops == NULL) return -EINVAL; /* Now try to get class */ if (clid == 0) { if (portid == TC_H_ROOT) clid = qid; } else clid = TC_H_MAKE(qid, clid); if (clid) cl = cops->get(q, clid); if (cl == 0) { err = -ENOENT; if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags & NLM_F_CREATE)) goto out; } else { switch (n->nlmsg_type) { case RTM_NEWTCLASS: err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) goto out; break; case RTM_DELTCLASS: err = -EOPNOTSUPP; if (cops->delete) err = cops->delete(q, cl); if (err == 0) tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS); goto out; case RTM_GETTCLASS: err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); goto out; default: err = -EINVAL; goto out; } } new_cl = cl; err = -EOPNOTSUPP; if (cops->change) err = cops->change(q, clid, portid, tca, &new_cl); if (err == 0) tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); out: if (cl) cops->put(q, cl); return err; } static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, unsigned long cl, u32 portid, u32 seq, u16 flags, int event) { struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) goto out_nlmsg_trim; tcm = nlmsg_data(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; tcm->tcm_ifindex = qdisc_dev(q)->ifindex; tcm->tcm_parent = q->handle; tcm->tcm_handle = q->handle; tcm->tcm_info = 0; if (nla_put_string(skb, TCA_KIND, q->ops->id)) goto nla_put_failure; if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) goto nla_put_failure; if (gnet_stats_finish_copy(&d) < 0) goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; out_nlmsg_trim: nla_put_failure: nlmsg_trim(skb, b); return -1; } static int tclass_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct Qdisc *q, unsigned long cl, int event) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { kfree_skb(skb); return -EINVAL; } return rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); } struct qdisc_dump_args { struct qdisc_walker w; struct sk_buff *skb; struct netlink_callback *cb; }; static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) { struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid, a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); } static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, struct tcmsg *tcm, struct netlink_callback *cb, int *t_p, int s_t) { struct qdisc_dump_args arg; if (tc_qdisc_dump_ignore(q) || *t_p < s_t || !q->ops->cl_ops || (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)) { (*t_p)++; return 0; } if (*t_p > s_t) memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); arg.w.fn = qdisc_class_dump; arg.skb = skb; arg.cb = cb; arg.w.stop = 0; arg.w.skip = cb->args[1]; arg.w.count = 0; q->ops->cl_ops->walk(q, &arg.w); cb->args[1] = arg.w.count; if (arg.w.stop) return -1; (*t_p)++; return 0; } static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, struct tcmsg *tcm, struct netlink_callback *cb, int *t_p, int s_t) { struct Qdisc *q; if (!root) return 0; if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) return -1; list_for_each_entry(q, &root->list, list) { if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) return -1; } return 0; } static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) { struct tcmsg *tcm = nlmsg_data(cb->nlh); struct net *net = sock_net(skb->sk); struct netdev_queue *dev_queue; struct net_device *dev; int t, s_t; if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return 0; dev = dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return 0; s_t = cb->args[0]; t = 0; if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) goto done; done: cb->args[0] = t; dev_put(dev); return skb->len; } /* Main classifier routine: scans classifier chain attached * to this qdisc, (optionally) tests for protocol and asks * specific classifiers. */ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { __be16 protocol = skb->protocol; int err; for (; tp; tp = tp->next) { if (tp->protocol != protocol && tp->protocol != htons(ETH_P_ALL)) continue; err = tp->classify(skb, tp, res); if (err >= 0) { #ifdef CONFIG_NET_CLS_ACT if (err != TC_ACT_RECLASSIFY && skb->tc_verd) skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); #endif return err; } } return -1; } EXPORT_SYMBOL(tc_classify_compat); int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { int err = 0; #ifdef CONFIG_NET_CLS_ACT const struct tcf_proto *otp = tp; reclassify: #endif err = tc_classify_compat(skb, tp, res); #ifdef CONFIG_NET_CLS_ACT if (err == TC_ACT_RECLASSIFY) { u32 verd = G_TC_VERD(skb->tc_verd); tp = otp; if (verd++ >= MAX_REC_LOOP) { net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n", tp->q->ops->id, tp->prio & 0xffff, ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); goto reclassify; } #endif return err; } EXPORT_SYMBOL(tc_classify); void tcf_destroy(struct tcf_proto *tp) { tp->ops->destroy(tp); module_put(tp->ops->owner); kfree(tp); } void tcf_destroy_chain(struct tcf_proto **fl) { struct tcf_proto *tp; while ((tp = *fl) != NULL) { *fl = tp->next; tcf_destroy(tp); } } EXPORT_SYMBOL(tcf_destroy_chain); #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { struct timespec ts; hrtimer_get_res(CLOCK_MONOTONIC, &ts); seq_printf(seq, "%08x %08x %08x %08x\n", (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1), 1000000, (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts))); return 0; } static int psched_open(struct inode *inode, struct file *file) { return single_open(file, psched_show, NULL); } static const struct file_operations psched_fops = { .owner = THIS_MODULE, .open = psched_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __net_init psched_net_init(struct net *net) { struct proc_dir_entry *e; e = proc_create("psched", 0, net->proc_net, &psched_fops); if (e == NULL) return -ENOMEM; return 0; } static void __net_exit psched_net_exit(struct net *net) { remove_proc_entry("psched", net->proc_net); } #else static int __net_init psched_net_init(struct net *net) { return 0; } static void __net_exit psched_net_exit(struct net *net) { } #endif static struct pernet_operations psched_net_ops = { .init = psched_net_init, .exit = psched_net_exit, }; static int __init pktsched_init(void) { int err; err = register_pernet_subsys(&psched_net_ops); if (err) { pr_err("pktsched_init: " "cannot initialize per netns operations\n"); return err; } register_qdisc(&pfifo_fast_ops); register_qdisc(&pfifo_qdisc_ops); register_qdisc(&bfifo_qdisc_ops); register_qdisc(&pfifo_head_drop_qdisc_ops); register_qdisc(&mq_qdisc_ops); rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL); rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL); return 0; } subsys_initcall(pktsched_init);
gpl-2.0
nmathewson/linux-2.6
drivers/block/virtio_blk.c
315
13539
//#define DEBUG #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> #define PART_BITS 4 static int major, index; struct virtio_blk { spinlock_t lock; struct virtio_device *vdev; struct virtqueue *vq; /* The disk structure for the kernel. */ struct gendisk *disk; /* Request tracking. */ struct list_head reqs; mempool_t *pool; /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; /* Scatterlist: can be too big for stack. */ struct scatterlist sg[/*sg_elems*/]; }; struct virtblk_req { struct list_head list; struct request *req; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; u8 status; }; static void blk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; struct virtblk_req *vbr; unsigned int len; unsigned long flags; spin_lock_irqsave(&vblk->lock, flags); while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { int error; switch (vbr->status) { case VIRTIO_BLK_S_OK: error = 0; break; case VIRTIO_BLK_S_UNSUPP: error = -ENOTTY; break; default: error = -EIO; break; } switch (vbr->req->cmd_type) { case REQ_TYPE_BLOCK_PC: vbr->req->resid_len = vbr->in_hdr.residual; vbr->req->sense_len = vbr->in_hdr.sense_len; vbr->req->errors = vbr->in_hdr.errors; break; case REQ_TYPE_SPECIAL: vbr->req->errors = (error != 0); break; default: break; } __blk_end_request_all(vbr->req, error); list_del(&vbr->list); mempool_free(vbr, vblk->pool); } /* In case queue is stopped waiting for more buffers. */ blk_start_queue(vblk->disk->queue); spin_unlock_irqrestore(&vblk->lock, flags); } static bool do_req(struct request_queue *q, struct virtio_blk *vblk, struct request *req) { unsigned long num, out = 0, in = 0; struct virtblk_req *vbr; vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); if (!vbr) /* When another request finishes we'll try again. */ return false; vbr->req = req; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); } else { switch (req->cmd_type) { case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_SPECIAL: vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; default: /* We don't put anything else in the queue. */ BUG(); } } sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); /* * If this is a packet command we need a couple of additional headers. * Behind the normal outhdr we put a segment with the scsi command * block, and before the normal inhdr we put the sense data and the * inhdr with additional status information before the normal inhdr. */ if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, sizeof(vbr->in_hdr)); } sg_set_buf(&vblk->sg[num + out + in++], &vbr->status, sizeof(vbr->status)); if (num) { if (rq_data_dir(vbr->req) == WRITE) { vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; out += num; } else { vbr->out_hdr.type |= VIRTIO_BLK_T_IN; in += num; } } if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { mempool_free(vbr, vblk->pool); return false; } list_add_tail(&vbr->list, &vblk->reqs); return true; } static void do_virtblk_request(struct request_queue *q) { struct virtio_blk *vblk = q->queuedata; struct request *req; unsigned int issued = 0; while ((req = blk_peek_request(q)) != NULL) { BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); /* If this request fails, stop queue and wait for something to finish to restart it. */ if (!do_req(q, vblk, req)) { blk_stop_queue(q); break; } blk_start_request(req); issued++; } if (issued) virtqueue_kick(vblk->vq); } /* return id (s/n) string for *disk to *id_str */ static int virtblk_get_id(struct gendisk *disk, char *id_str) { struct virtio_blk *vblk = disk->private_data; struct request *req; struct bio *bio; int err; bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (IS_ERR(bio)) return PTR_ERR(bio); req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); if (IS_ERR(req)) { bio_put(bio); return PTR_ERR(req); } req->cmd_type = REQ_TYPE_SPECIAL; err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); blk_put_request(req); return err; } static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long data) { struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; /* * Only allow the generic SCSI ioctls if the host can support it. */ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) return -ENOTTY; return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, (void __user *)data); } /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; struct virtio_blk_geometry vgeo; int err; /* see if the host passed in geometry config */ err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, offsetof(struct virtio_blk_config, geometry), &vgeo); if (!err) { geo->heads = vgeo.heads; geo->sectors = vgeo.sectors; geo->cylinders = vgeo.cylinders; } else { /* some standard values, similar to sd */ geo->heads = 1 << 6; geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } return 0; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, .getgeo = virtblk_getgeo, }; static int index_to_minor(int index) { return index << PART_BITS; } static ssize_t virtblk_serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int err; /* sysfs gives us a PAGE_SIZE buffer */ BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); buf[VIRTIO_BLK_ID_BYTES] = '\0'; err = virtblk_get_id(disk, buf); if (!err) return strlen(buf); if (err == -EIO) /* Unsupported? Make it empty. */ return 0; return err; } DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); static int __devinit virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; struct request_queue *q; int err; u64 cap; u32 v, blk_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; if (index_to_minor(index) >= 1 << MINORBITS) return -ENOSPC; /* We need to know how many segments before we allocate. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, offsetof(struct virtio_blk_config, seg_max), &sg_elems); /* We need at least one SG element, whatever they say. */ if (err || !sg_elems) sg_elems = 1; /* We need an extra sg elements at head and tail. */ sg_elems += 2; vdev->priv = vblk = kmalloc(sizeof(*vblk) + sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out; } INIT_LIST_HEAD(&vblk->reqs); spin_lock_init(&vblk->lock); vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); if (IS_ERR(vblk->vq)) { err = PTR_ERR(vblk->vq); goto out_free_vblk; } vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); if (!vblk->pool) { err = -ENOMEM; goto out_free_vq; } /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); if (!vblk->disk) { err = -ENOMEM; goto out_mempool; } q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); if (!q) { err = -ENOMEM; goto out_put_disk; } q->queuedata = vblk; if (index < 26) { sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); } else if (index < (26 + 1) * 26) { sprintf(vblk->disk->disk_name, "vd%c%c", 'a' + index / 26 - 1, 'a' + index % 26); } else { const unsigned int m1 = (index / 26 - 1) / 26 - 1; const unsigned int m2 = (index / 26 - 1) % 26; const unsigned int m3 = index % 26; sprintf(vblk->disk->disk_name, "vd%c%c%c", 'a' + m1, 'a' + m2, 'a' + m3); } vblk->disk->major = major; vblk->disk->first_minor = index_to_minor(index); vblk->disk->private_data = vblk; vblk->disk->fops = &virtblk_fops; vblk->disk->driverfs_dev = &vdev->dev; index++; /* configure queue flush support */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) blk_queue_flush(q, REQ_FLUSH); /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) set_disk_ro(vblk->disk, 1); /* Host must always specify the capacity. */ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), &cap, sizeof(cap)); /* If capacity is too big, truncate with warning. */ if ((sector_t)cap != cap) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)cap); cap = (sector_t)-1; } set_capacity(vblk->disk, cap); /* We can handle whatever the host told us to handle. */ blk_queue_max_segments(q, vblk->sg_elems-2); /* No need to bounce any requests */ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); /* Host can optionally specify maximum segment size and number of * segments. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, offsetof(struct virtio_blk_config, size_max), &v); if (!err) blk_queue_max_segment_size(q, v); else blk_queue_max_segment_size(q, -1U); /* Host can optionally specify the block size of the device */ err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, offsetof(struct virtio_blk_config, blk_size), &blk_size); if (!err) blk_queue_logical_block_size(q, blk_size); else blk_size = queue_logical_block_size(q); /* Use topology information if available */ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, physical_block_exp), &physical_block_exp); if (!err && physical_block_exp) blk_queue_physical_block_size(q, blk_size * (1 << physical_block_exp)); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, alignment_offset), &alignment_offset); if (!err && alignment_offset) blk_queue_alignment_offset(q, blk_size * alignment_offset); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, min_io_size), &min_io_size); if (!err && min_io_size) blk_queue_io_min(q, blk_size * min_io_size); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, opt_io_size), &opt_io_size); if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); add_disk(vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) goto out_del_disk; return 0; out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); out_put_disk: put_disk(vblk->disk); out_mempool: mempool_destroy(vblk->pool); out_free_vq: vdev->config->del_vqs(vdev); out_free_vblk: kfree(vblk); out: return err; } static void __devexit virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; /* Nothing should be pending. */ BUG_ON(!list_empty(&vblk->reqs)); /* Stop all the virtqueues. */ vdev->config->reset(vdev); del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); put_disk(vblk->disk); mempool_destroy(vblk->pool); vdev->config->del_vqs(vdev); kfree(vblk); } static const struct virtio_device_id id_table[] = { { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY }; /* * virtio_blk causes spurious section mismatch warning by * simultaneously referring to a __devinit and a __devexit function. * Use __refdata to avoid this warning. */ static struct virtio_driver __refdata virtio_blk = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtblk_probe, .remove = __devexit_p(virtblk_remove), }; static int __init init(void) { major = register_blkdev(0, "virtblk"); if (major < 0) return major; return register_virtio_driver(&virtio_blk); } static void __exit fini(void) { unregister_blkdev(major, "virtblk"); unregister_virtio_driver(&virtio_blk); } module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio block driver"); MODULE_LICENSE("GPL");
gpl-2.0
TeamFreedom/FreedomKernel
drivers/ide/ide-cs.c
315
12331
/*====================================================================== A driver for PCMCIA IDE/ATA disk cards The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/ide.h> #include <linux/major.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/system.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #define DRV_NAME "ide-cs" /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ typedef struct ide_info_t { struct pcmcia_device *p_dev; struct ide_host *host; int ndev; } ide_info_t; static void ide_release(struct pcmcia_device *); static int ide_config(struct pcmcia_device *); static void ide_detach(struct pcmcia_device *p_dev); static int ide_probe(struct pcmcia_device *link) { ide_info_t *info; dev_dbg(&link->dev, "ide_attach()\n"); /* Create new ide device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; return ide_config(link); } /* ide_attach */ static void ide_detach(struct pcmcia_device *link) { ide_info_t *info = link->priv; dev_dbg(&link->dev, "ide_detach(0x%p)\n", link); ide_release(link); kfree(info); } /* ide_detach */ static const struct ide_port_ops idecs_port_ops = { .quirkproc = ide_undecoded_slave, }; static const struct ide_port_info idecs_port_info = { .port_ops = &idecs_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_pci, }; static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) { struct ide_host *host; ide_hwif_t *hwif; int i, rc; struct ide_hw hw, *hws[] = { &hw }; if (!request_region(io, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, io, io + 7); return NULL; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(io, 8); return NULL; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, io, ctl); hw.irq = irq; hw.dev = &handle->dev; rc = ide_host_add(&idecs_port_info, hws, 1, &host); if (rc) goto out_release; hwif = host->ports[0]; if (hwif->present) return host; /* retry registration in case device is still spinning up */ for (i = 0; i < 10; i++) { msleep(100); ide_port_scan(hwif); if (hwif->present) return host; } return host; out_release: release_region(ctl, 1); release_region(io, 8); return NULL; } static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) { int *is_kme = priv_data; if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; } pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (pdev->resource[1]->end) { pdev->resource[0]->end = 8; pdev->resource[1]->end = (*is_kme) ? 2 : 1; } else { if (pdev->resource[0]->end < 16) return -ENODEV; } return pcmcia_request_io(pdev); } static int ide_config(struct pcmcia_device *link) { ide_info_t *info = link->priv; int ret = 0, is_kme = 0; unsigned long io_base, ctl_base; struct ide_host *host; dev_dbg(&link->dev, "ide_config(0x%p)\n", link); is_kme = ((link->manf_id == MANFID_KME) && ((link->card_id == PRODID_KME_KXLC005_A) || (link->card_id == PRODID_KME_KXLC005_B))); if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) { link->config_flags &= ~CONF_AUTO_CHECK_VCC; if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) goto failed; /* No suitable config found */ } io_base = link->resource[0]->start; if (link->resource[1]->end) ctl_base = link->resource[1]->start; else ctl_base = link->resource[0]->start + 0x0e; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; /* disable drive interrupts during IDE probe */ outb(0x02, ctl_base); /* special setup for KXLC005 card */ if (is_kme) outb(0x81, ctl_base+1); host = idecs_register(io_base, ctl_base, link->irq, link); if (host == NULL && resource_size(link->resource[0]) == 0x20) { outb(0x02, ctl_base + 0x10); host = idecs_register(io_base + 0x10, ctl_base + 0x10, link->irq, link); } if (host == NULL) goto failed; info->ndev = 1; info->host = host; dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n", 'a' + host->ports[0]->index * 2, link->vpp / 10, link->vpp % 10); return 0; failed: ide_release(link); return -ENODEV; } /* ide_config */ static void ide_release(struct pcmcia_device *link) { ide_info_t *info = link->priv; struct ide_host *host = info->host; dev_dbg(&link->dev, "ide_release(0x%p)\n", link); if (info->ndev) { ide_hwif_t *hwif = host->ports[0]; unsigned long data_addr, ctl_addr; data_addr = hwif->io_ports.data_addr; ctl_addr = hwif->io_ports.ctl_addr; ide_host_remove(host); info->ndev = 0; release_region(ctl_addr, 1); release_region(data_addr, 8); } pcmcia_disable_device(link); } /* ide_release */ static struct pcmcia_device_id ide_ids[] = { PCMCIA_DEVICE_FUNC_ID(4), PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904), PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */ PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */ PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf), PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591), PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883), PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d), PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, ide_ids); static struct pcmcia_driver ide_cs_driver = { .owner = THIS_MODULE, .name = "ide-cs", .probe = ide_probe, .remove = ide_detach, .id_table = ide_ids, }; static int __init init_ide_cs(void) { return pcmcia_register_driver(&ide_cs_driver); } static void __exit exit_ide_cs(void) { pcmcia_unregister_driver(&ide_cs_driver); } late_initcall(init_ide_cs); module_exit(exit_ide_cs);
gpl-2.0
dewadg/mako-kernel
lib/genalloc.c
571
12046
/* * Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular * kmalloc/kfree interface. Uses for this includes on-device special * memory, uncached memory etc. * * It is safe to use the allocator in NMI handlers and other special * unblockable contexts that could otherwise deadlock on locks. This * is implemented by using atomic operations and retries on any * conflicts. The disadvantage is that there may be livelocks in * extreme cases. For better scalability, one allocator can be used * for each CPU. * * The lockless operation only works if there is enough memory * available. If new memory is added to the pool a lock has to be * still taken. So any user relying on locklessness has to ensure * that sufficient memory is preallocated. * * The basic atomic operation of this allocator is cmpxchg on long. * On architectures that don't have NMI-safe cmpxchg implementation, * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/slab.h> #include <linux/export.h> #include <linux/bitmap.h> #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/genalloc.h> #include <linux/vmalloc.h> static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { unsigned long val, nval; nval = *addr; do { val = nval; if (val & mask_to_set) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); return 0; } static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) { unsigned long val, nval; nval = *addr; do { val = nval; if ((val & mask_to_clear) != mask_to_clear) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); return 0; } /* * bitmap_set_ll - set the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Set @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users set the same bit, one user will return remain bits, otherwise * return 0. */ static int bitmap_set_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_set >= 0) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); if (set_bits_ll(p, mask_to_set)) return nr; } return 0; } /* * bitmap_clear_ll - clear the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Clear @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users clear the same bit, one user will return remain bits, * otherwise return 0. */ static int bitmap_clear_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_clear >= 0) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); if (clear_bits_ll(p, mask_to_clear)) return nr; } return 0; } /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add_virt - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, size_t size, int nid) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; if (nbytes <= PAGE_SIZE) chunk = kmalloc_node(nbytes, __GFP_ZERO, nid); else chunk = vmalloc(nbytes); if (unlikely(chunk == NULL)) return -ENOMEM; if (nbytes > PAGE_SIZE) memset(chunk, 0, nbytes); chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size; atomic_set(&chunk->avail, size); spin_lock(&pool->lock); list_add_rcu(&chunk->next_chunk, &pool->chunks); spin_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add_virt); /** * gen_pool_virt_to_phys - return the physical address of memory * @pool: pool to allocate from * @addr: starting address of memory * * Returns the physical address on success, or -1 on error. */ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) { struct gen_pool_chunk *chunk; phys_addr_t paddr = -1; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { paddr = chunk->phys_addr + (addr - chunk->start_addr); break; } } rcu_read_unlock(); return paddr; } EXPORT_SYMBOL(gen_pool_virt_to_phys); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { int nbytes; chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; nbytes = sizeof(struct gen_pool_chunk) + (end_bit + BITS_PER_BYTE - 1) / BITS_PER_BYTE; bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); if (nbytes <= PAGE_SIZE) kfree(chunk); else vfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc_aligned - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @alignment_order: Order the allocated space should be * aligned to (eg. 20 means allocated space * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, unsigned alignment_order) { struct gen_pool_chunk *chunk; unsigned long addr = 0, align_mask = 0; int order = pool->min_alloc_order; int nbits, start_bit = 0, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif if (size == 0) return 0; if (alignment_order > order) align_mask = (1 << (alignment_order - order)) - 1; nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { unsigned long chunk_size; if (size > atomic_read(&chunk->avail)) continue; chunk_size = (chunk->end_addr - chunk->start_addr) >> order; retry: start_bit = bitmap_find_next_zero_area_off(chunk->bits, chunk_size, 0, nbits, align_mask, chunk->start_addr >> order); if (start_bit >= chunk_size) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); if (remain) { remain = bitmap_clear_ll(chunk->bits, start_bit, nbits - remain); BUG_ON(remain); goto retry; } addr = chunk->start_addr + ((unsigned long)start_bit << order); size = nbits << pool->min_alloc_order; atomic_sub(size, &chunk->avail); break; } rcu_read_unlock(); return addr; } EXPORT_SYMBOL(gen_pool_alloc_aligned); /** * gen_pool_free - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * * Free previously allocated special memory back to the specified * pool. Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int start_bit, nbits, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); start_bit = (addr - chunk->start_addr) >> order; remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); size = nbits << order; atomic_add(size, &chunk->avail); rcu_read_unlock(); return; } } rcu_read_unlock(); BUG(); } EXPORT_SYMBOL(gen_pool_free); /** * gen_pool_for_each_chunk - call func for every chunk of generic memory pool * @pool: the generic memory pool * @func: func to call * @data: additional data used by @func * * Call @func for every chunk of generic memory pool. The @func is * called with rcu_read_lock held. */ void gen_pool_for_each_chunk(struct gen_pool *pool, void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), void *data) { struct gen_pool_chunk *chunk; rcu_read_lock(); list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) func(pool, chunk, data); rcu_read_unlock(); } EXPORT_SYMBOL(gen_pool_for_each_chunk); /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space * * Return available free space of the specified pool. */ size_t gen_pool_avail(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t avail = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) avail += atomic_read(&chunk->avail); rcu_read_unlock(); return avail; } EXPORT_SYMBOL_GPL(gen_pool_avail); /** * gen_pool_size - get size in bytes of memory managed by the pool * @pool: pool to get size * * Return size in bytes of memory managed by the pool. */ size_t gen_pool_size(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t size = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) size += chunk->end_addr - chunk->start_addr; rcu_read_unlock(); return size; } EXPORT_SYMBOL_GPL(gen_pool_size);
gpl-2.0
drod2169/Linux-3.11.x
arch/arm/mach-orion5x/terastation_pro2-setup.c
2107
9496
/* * Buffalo Terastation Pro II/Live Board Setup * * Maintainer: Sylver Bruneau <sylver.bruneau@googlemail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/i2c.h> #include <linux/serial_reg.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * Terastation Pro 2/Live Info ****************************************************************************/ /* * Terastation Pro 2 hardware : * - Marvell 88F5281-D0 * - Marvell 88SX6042 SATA controller (PCI) * - Marvell 88E1118 Gigabit Ethernet PHY * - 256KB NOR flash * - 128MB of DDR RAM * - PCIe port (not equipped) */ /* * 256K NOR flash Device bus boot chip select */ #define TSP2_NOR_BOOT_BASE 0xf4000000 #define TSP2_NOR_BOOT_SIZE SZ_256K /***************************************************************************** * 256KB NOR Flash on BOOT Device ****************************************************************************/ static struct physmap_flash_data tsp2_nor_flash_data = { .width = 1, }; static struct resource tsp2_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = TSP2_NOR_BOOT_BASE, .end = TSP2_NOR_BOOT_BASE + TSP2_NOR_BOOT_SIZE - 1, }; static struct platform_device tsp2_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &tsp2_nor_flash_data, }, .num_resources = 1, .resource = &tsp2_nor_flash_resource, }; /***************************************************************************** * PCI ****************************************************************************/ #define TSP2_PCI_SLOT0_OFFS 7 #define TSP2_PCI_SLOT0_IRQ_PIN 11 void __init tsp2_pci_preinit(void) { int pin; /* * Configure PCI GPIO IRQ pins */ pin = TSP2_PCI_SLOT0_IRQ_PIN; if (gpio_request(pin, "PCI Int1") == 0) { if (gpio_direction_input(pin) == 0) { irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); } else { printk(KERN_ERR "tsp2_pci_preinit failed " "to set_irq_type pin %d\n", pin); gpio_free(pin); } } else { printk(KERN_ERR "tsp2_pci_preinit failed to " "gpio_request %d\n", pin); } } static int __init tsp2_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; /* * Check for devices with hard-wired IRQs. */ irq = orion5x_pci_map_irq(dev, slot, pin); if (irq != -1) return irq; /* * PCI IRQs are connected via GPIOs. */ if (slot == TSP2_PCI_SLOT0_OFFS) return gpio_to_irq(TSP2_PCI_SLOT0_IRQ_PIN); return -1; } static struct hw_pci tsp2_pci __initdata = { .nr_controllers = 2, .preinit = tsp2_pci_preinit, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = tsp2_pci_map_irq, }; static int __init tsp2_pci_init(void) { if (machine_is_terastation_pro2()) pci_common_init(&tsp2_pci); return 0; } subsys_initcall(tsp2_pci_init); /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data tsp2_eth_data = { .phy_addr = 0, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ #define TSP2_RTC_GPIO 9 static struct i2c_board_info __initdata tsp2_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), }; /***************************************************************************** * Terastation Pro II specific power off method via UART1-attached * microcontroller ****************************************************************************/ #define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2)) static int tsp2_miconread(unsigned char *buf, int count) { int i; int timeout; for (i = 0; i < count; i++) { timeout = 10; while (!(readl(UART1_REG(LSR)) & UART_LSR_DR)) { if (--timeout == 0) break; udelay(1000); } if (timeout == 0) break; buf[i] = readl(UART1_REG(RX)); } /* return read bytes */ return i; } static int tsp2_miconwrite(const unsigned char *buf, int count) { int i = 0; while (count--) { while (!(readl(UART1_REG(LSR)) & UART_LSR_THRE)) barrier(); writel(buf[i++], UART1_REG(TX)); } return 0; } static int tsp2_miconsend(const unsigned char *data, int count) { int i; unsigned char checksum = 0; unsigned char recv_buf[40]; unsigned char send_buf[40]; unsigned char correct_ack[3]; int retry = 2; /* Generate checksum */ for (i = 0; i < count; i++) checksum -= data[i]; do { /* Send data */ tsp2_miconwrite(data, count); /* send checksum */ tsp2_miconwrite(&checksum, 1); if (tsp2_miconread(recv_buf, sizeof(recv_buf)) <= 3) { printk(KERN_ERR ">%s: receive failed.\n", __func__); /* send preamble to clear the receive buffer */ memset(&send_buf, 0xff, sizeof(send_buf)); tsp2_miconwrite(send_buf, sizeof(send_buf)); /* make dummy reads */ mdelay(100); tsp2_miconread(recv_buf, sizeof(recv_buf)); } else { /* Generate expected ack */ correct_ack[0] = 0x01; correct_ack[1] = data[1]; correct_ack[2] = 0x00; /* checksum Check */ if ((recv_buf[0] + recv_buf[1] + recv_buf[2] + recv_buf[3]) & 0xFF) { printk(KERN_ERR ">%s: Checksum Error : " "Received data[%02x, %02x, %02x, %02x]" "\n", __func__, recv_buf[0], recv_buf[1], recv_buf[2], recv_buf[3]); } else { /* Check Received Data */ if (correct_ack[0] == recv_buf[0] && correct_ack[1] == recv_buf[1] && correct_ack[2] == recv_buf[2]) { /* Interval for next command */ mdelay(10); /* Receive ACK */ return 0; } } /* Received NAK or illegal Data */ printk(KERN_ERR ">%s: Error : NAK or Illegal Data " "Received\n", __func__); } } while (retry--); /* Interval for next command */ mdelay(10); return -1; } static void tsp2_power_off(void) { const unsigned char watchdogkill[] = {0x01, 0x35, 0x00}; const unsigned char shutdownwait[] = {0x00, 0x0c}; const unsigned char poweroff[] = {0x00, 0x06}; /* 38400 baud divisor */ const unsigned divisor = ((orion5x_tclk + (8 * 38400)) / (16 * 38400)); pr_info("%s: triggering power-off...\n", __func__); /* hijack uart1 and reset into sane state (38400,8n1,even parity) */ writel(0x83, UART1_REG(LCR)); writel(divisor & 0xff, UART1_REG(DLL)); writel((divisor >> 8) & 0xff, UART1_REG(DLM)); writel(0x1b, UART1_REG(LCR)); writel(0x00, UART1_REG(IER)); writel(0x07, UART1_REG(FCR)); writel(0x00, UART1_REG(MCR)); /* Send the commands to shutdown the Terastation Pro II */ tsp2_miconsend(watchdogkill, sizeof(watchdogkill)) ; tsp2_miconsend(shutdownwait, sizeof(shutdownwait)) ; tsp2_miconsend(poweroff, sizeof(poweroff)); } /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int tsp2_mpp_modes[] __initdata = { MPP0_PCIE_RST_OUTn, MPP1_UNUSED, MPP2_UNUSED, MPP3_UNUSED, MPP4_NAND, /* BOOT NAND Flash REn */ MPP5_NAND, /* BOOT NAND Flash WEn */ MPP6_NAND, /* BOOT NAND Flash HREn[0] */ MPP7_NAND, /* BOOT NAND Flash WEn[0] */ MPP8_GPIO, /* MICON int */ MPP9_GPIO, /* RTC int */ MPP10_UNUSED, MPP11_GPIO, /* PCI Int A */ MPP12_UNUSED, MPP13_GPIO, /* UPS on UART0 enable */ MPP14_GPIO, /* UPS low battery detection */ MPP15_UNUSED, MPP16_UART, /* UART1 RXD */ MPP17_UART, /* UART1 TXD */ MPP18_UART, /* UART1 CTSn */ MPP19_UART, /* UART1 RTSn */ 0, }; static void __init tsp2_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(tsp2_mpp_modes); /* * Configure peripherals. */ mvebu_mbus_add_window("devbus-boot", TSP2_NOR_BOOT_BASE, TSP2_NOR_BOOT_SIZE); platform_device_register(&tsp2_nor_flash); orion5x_ehci0_init(); orion5x_eth_init(&tsp2_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); orion5x_uart1_init(); /* Get RTC IRQ and register the chip */ if (gpio_request(TSP2_RTC_GPIO, "rtc") == 0) { if (gpio_direction_input(TSP2_RTC_GPIO) == 0) tsp2_i2c_rtc.irq = gpio_to_irq(TSP2_RTC_GPIO); else gpio_free(TSP2_RTC_GPIO); } if (tsp2_i2c_rtc.irq == 0) pr_warning("tsp2_init: failed to get RTC IRQ\n"); i2c_register_board_info(0, &tsp2_i2c_rtc, 1); /* register Terastation Pro II specific power-off method */ pm_power_off = tsp2_power_off; } MACHINE_START(TERASTATION_PRO2, "Buffalo Terastation Pro II/Live") /* Maintainer: Sylver Bruneau <sylver.bruneau@googlemail.com> */ .atag_offset = 0x100, .init_machine = tsp2_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .init_time = orion5x_timer_init, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
bigbiff/android_kernel_samsung_zeroflte
drivers/tty/serial/nwpserial.c
2107
11634
/* * Serial Port driver for a NWP uart device * * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/init.h> #include <linux/export.h> #include <linux/console.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/serial_core.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/irqreturn.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/nwpserial.h> #include <asm/prom.h> #include <asm/dcr.h> #define NWPSERIAL_NR 2 #define NWPSERIAL_STATUS_RXVALID 0x1 #define NWPSERIAL_STATUS_TXFULL 0x2 struct nwpserial_port { struct uart_port port; dcr_host_t dcr_host; unsigned int ier; unsigned int mcr; }; static DEFINE_MUTEX(nwpserial_mutex); static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR]; static void wait_for_bits(struct nwpserial_port *up, int bits) { unsigned int status, tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ do { status = dcr_read(up->dcr_host, UART_LSR); if (--tmout == 0) break; udelay(1); } while ((status & bits) != bits); } #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE static void nwpserial_console_putchar(struct uart_port *port, int c) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); /* check if tx buffer is full */ wait_for_bits(up, UART_LSR_THRE); dcr_write(up->dcr_host, UART_TX, c); up->port.icount.tx++; } static void nwpserial_console_write(struct console *co, const char *s, unsigned int count) { struct nwpserial_port *up = &nwpserial_ports[co->index]; unsigned long flags; int locked = 1; if (oops_in_progress) locked = spin_trylock_irqsave(&up->port.lock, flags); else spin_lock_irqsave(&up->port.lock, flags); /* save and disable interrupt */ up->ier = dcr_read(up->dcr_host, UART_IER); dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI); uart_console_write(&up->port, s, count, nwpserial_console_putchar); /* wait for transmitter to become empty */ while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0) cpu_relax(); /* restore interrupt state */ dcr_write(up->dcr_host, UART_IER, up->ier); if (locked) spin_unlock_irqrestore(&up->port.lock, flags); } static struct uart_driver nwpserial_reg; static struct console nwpserial_console = { .name = "ttySQ", .write = nwpserial_console_write, .device = uart_console_device, .flags = CON_PRINTBUFFER, .index = -1, .data = &nwpserial_reg, }; #define NWPSERIAL_CONSOLE (&nwpserial_console) #else #define NWPSERIAL_CONSOLE NULL #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ /**************************************************************************/ static int nwpserial_request_port(struct uart_port *port) { return 0; } static void nwpserial_release_port(struct uart_port *port) { /* N/A */ } static void nwpserial_config_port(struct uart_port *port, int flags) { port->type = PORT_NWPSERIAL; } static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) { struct nwpserial_port *up = dev_id; struct tty_port *port = &up->port.state->port; irqreturn_t ret; unsigned int iir; unsigned char ch; spin_lock(&up->port.lock); /* check if the uart was the interrupt source. */ iir = dcr_read(up->dcr_host, UART_IIR); if (!iir) { ret = IRQ_NONE; goto out; } do { up->port.icount.rx++; ch = dcr_read(up->dcr_host, UART_RX); if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) tty_insert_flip_char(port, ch, TTY_NORMAL); } while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR); tty_flip_buffer_push(port); ret = IRQ_HANDLED; /* clear interrupt */ dcr_write(up->dcr_host, UART_IIR, 1); out: spin_unlock(&up->port.lock); return ret; } static int nwpserial_startup(struct uart_port *port) { struct nwpserial_port *up; int err; up = container_of(port, struct nwpserial_port, port); /* disable flow control by default */ up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE; dcr_write(up->dcr_host, UART_MCR, up->mcr); /* register interrupt handler */ err = request_irq(up->port.irq, nwpserial_interrupt, IRQF_SHARED, "nwpserial", up); if (err) return err; /* enable interrupts */ up->ier = UART_IER_RDI; dcr_write(up->dcr_host, UART_IER, up->ier); /* enable receiving */ up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID; return 0; } static void nwpserial_shutdown(struct uart_port *port) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); /* disable receiving */ up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; /* disable interrupts from this port */ up->ier = 0; dcr_write(up->dcr_host, UART_IER, up->ier); /* free irq */ free_irq(up->port.irq, up); } static int nwpserial_verify_port(struct uart_port *port, struct serial_struct *ser) { return -EINVAL; } static const char *nwpserial_type(struct uart_port *port) { return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL; } static void nwpserial_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID | NWPSERIAL_STATUS_TXFULL; up->port.ignore_status_mask = 0; /* ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; /* Copy back the old hardware settings */ if (old) tty_termios_copy_hw(termios, old); } static void nwpserial_break_ctl(struct uart_port *port, int ctl) { /* N/A */ } static void nwpserial_enable_ms(struct uart_port *port) { /* N/A */ } static void nwpserial_stop_rx(struct uart_port *port) { struct nwpserial_port *up; up = container_of(port, struct nwpserial_port, port); /* don't forward any more data (like !CREAD) */ up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID; } static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c) { /* check if tx buffer is full */ wait_for_bits(up, UART_LSR_THRE); dcr_write(up->dcr_host, UART_TX, c); up->port.icount.tx++; } static void nwpserial_start_tx(struct uart_port *port) { struct nwpserial_port *up; struct circ_buf *xmit; up = container_of(port, struct nwpserial_port, port); xmit = &up->port.state->xmit; if (port->x_char) { nwpserial_putchar(up, up->port.x_char); port->x_char = 0; } while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) { nwpserial_putchar(up, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); } } static unsigned int nwpserial_get_mctrl(struct uart_port *port) { return 0; } static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl) { /* N/A */ } static void nwpserial_stop_tx(struct uart_port *port) { /* N/A */ } static unsigned int nwpserial_tx_empty(struct uart_port *port) { struct nwpserial_port *up; unsigned long flags; int ret; up = container_of(port, struct nwpserial_port, port); spin_lock_irqsave(&up->port.lock, flags); ret = dcr_read(up->dcr_host, UART_LSR); spin_unlock_irqrestore(&up->port.lock, flags); return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0; } static struct uart_ops nwpserial_pops = { .tx_empty = nwpserial_tx_empty, .set_mctrl = nwpserial_set_mctrl, .get_mctrl = nwpserial_get_mctrl, .stop_tx = nwpserial_stop_tx, .start_tx = nwpserial_start_tx, .stop_rx = nwpserial_stop_rx, .enable_ms = nwpserial_enable_ms, .break_ctl = nwpserial_break_ctl, .startup = nwpserial_startup, .shutdown = nwpserial_shutdown, .set_termios = nwpserial_set_termios, .type = nwpserial_type, .release_port = nwpserial_release_port, .request_port = nwpserial_request_port, .config_port = nwpserial_config_port, .verify_port = nwpserial_verify_port, }; static struct uart_driver nwpserial_reg = { .owner = THIS_MODULE, .driver_name = "nwpserial", .dev_name = "ttySQ", .major = TTY_MAJOR, .minor = 68, .nr = NWPSERIAL_NR, .cons = NWPSERIAL_CONSOLE, }; int nwpserial_register_port(struct uart_port *port) { struct nwpserial_port *up = NULL; int ret = -1; int i; static int first = 1; int dcr_len; int dcr_base; struct device_node *dn; mutex_lock(&nwpserial_mutex); dn = port->dev->of_node; if (dn == NULL) goto out; /* get dcr base. */ dcr_base = dcr_resource_start(dn, 0); /* find matching entry */ for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.iobase == dcr_base) { up = &nwpserial_ports[i]; break; } /* we didn't find a mtching entry, search for a free port */ if (up == NULL) for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.type == PORT_UNKNOWN && nwpserial_ports[i].port.iobase == 0) { up = &nwpserial_ports[i]; break; } if (up == NULL) { ret = -EBUSY; goto out; } if (first) uart_register_driver(&nwpserial_reg); first = 0; up->port.membase = port->membase; up->port.irq = port->irq; up->port.uartclk = port->uartclk; up->port.fifosize = port->fifosize; up->port.regshift = port->regshift; up->port.iotype = port->iotype; up->port.flags = port->flags; up->port.mapbase = port->mapbase; up->port.private_data = port->private_data; if (port->dev) up->port.dev = port->dev; if (up->port.iobase != dcr_base) { up->port.ops = &nwpserial_pops; up->port.fifosize = 16; spin_lock_init(&up->port.lock); up->port.iobase = dcr_base; dcr_len = dcr_resource_len(dn, 0); up->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(up->dcr_host)) { printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); goto out; } } ret = uart_add_one_port(&nwpserial_reg, &up->port); if (ret == 0) ret = up->port.line; out: mutex_unlock(&nwpserial_mutex); return ret; } EXPORT_SYMBOL(nwpserial_register_port); void nwpserial_unregister_port(int line) { struct nwpserial_port *up = &nwpserial_ports[line]; mutex_lock(&nwpserial_mutex); uart_remove_one_port(&nwpserial_reg, &up->port); up->port.type = PORT_UNKNOWN; mutex_unlock(&nwpserial_mutex); } EXPORT_SYMBOL(nwpserial_unregister_port); #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE static int __init nwpserial_console_init(void) { struct nwpserial_port *up = NULL; struct device_node *dn; const char *name; int dcr_base; int dcr_len; int i; /* search for a free port */ for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.type == PORT_UNKNOWN) { up = &nwpserial_ports[i]; break; } if (up == NULL) return -1; name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) return -1; dn = of_find_node_by_path(name); if (!dn) return -1; spin_lock_init(&up->port.lock); up->port.ops = &nwpserial_pops; up->port.type = PORT_NWPSERIAL; up->port.fifosize = 16; dcr_base = dcr_resource_start(dn, 0); dcr_len = dcr_resource_len(dn, 0); up->port.iobase = dcr_base; up->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(up->dcr_host)) { printk("Cannot map DCR resources for SERIAL"); return -1; } register_console(&nwpserial_console); return 0; } console_initcall(nwpserial_console_init); #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
gpl-2.0
CyanogenMod/android_kernel_htc_msm8974
drivers/base/regmap/regcache-lzo.c
4411
9264
/* * Register cache access API - LZO caching support * * Copyright 2011 Wolfson Microelectronics plc * * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/device.h> #include <linux/lzo.h> #include "internal.h" static int regcache_lzo_exit(struct regmap *map); struct regcache_lzo_ctx { void *wmem; void *dst; const void *src; size_t src_len; size_t dst_len; size_t decompressed_size; unsigned long *sync_bmp; int sync_bmp_nbits; }; #define LZO_BLOCK_NUM 8 static int regcache_lzo_block_count(struct regmap *map) { return LZO_BLOCK_NUM; } static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx) { lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); if (!lzo_ctx->wmem) return -ENOMEM; return 0; } static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx) { size_t compress_size; int ret; ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, lzo_ctx->dst, &compress_size, lzo_ctx->wmem); if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) return -EINVAL; lzo_ctx->dst_len = compress_size; return 0; } static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx) { size_t dst_len; int ret; dst_len = lzo_ctx->dst_len; ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, lzo_ctx->dst, &dst_len); if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) return -EINVAL; return 0; } static int regcache_lzo_compress_cache_block(struct regmap *map, struct regcache_lzo_ctx *lzo_ctx) { int ret; lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); if (!lzo_ctx->dst) { lzo_ctx->dst_len = 0; return -ENOMEM; } ret = regcache_lzo_compress(lzo_ctx); if (ret < 0) return ret; return 0; } static int regcache_lzo_decompress_cache_block(struct regmap *map, struct regcache_lzo_ctx *lzo_ctx) { int ret; lzo_ctx->dst_len = lzo_ctx->decompressed_size; lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); if (!lzo_ctx->dst) { lzo_ctx->dst_len = 0; return -ENOMEM; } ret = regcache_lzo_decompress(lzo_ctx); if (ret < 0) return ret; return 0; } static inline int regcache_lzo_get_blkindex(struct regmap *map, unsigned int reg) { return (reg * map->cache_word_size) / DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count(map)); } static inline int regcache_lzo_get_blkpos(struct regmap *map, unsigned int reg) { return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count(map)) / map->cache_word_size); } static inline int regcache_lzo_get_blksize(struct regmap *map) { return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count(map)); } static int regcache_lzo_init(struct regmap *map) { struct regcache_lzo_ctx **lzo_blocks; size_t bmp_size; int ret, i, blksize, blkcount; const char *p, *end; unsigned long *sync_bmp; ret = 0; blkcount = regcache_lzo_block_count(map); map->cache = kzalloc(blkcount * sizeof *lzo_blocks, GFP_KERNEL); if (!map->cache) return -ENOMEM; lzo_blocks = map->cache; /* * allocate a bitmap to be used when syncing the cache with * the hardware. Each time a register is modified, the corresponding * bit is set in the bitmap, so we know that we have to sync * that register. */ bmp_size = map->num_reg_defaults_raw; sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long), GFP_KERNEL); if (!sync_bmp) { ret = -ENOMEM; goto err; } bitmap_zero(sync_bmp, bmp_size); /* allocate the lzo blocks and initialize them */ for (i = 0; i < blkcount; i++) { lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, GFP_KERNEL); if (!lzo_blocks[i]) { kfree(sync_bmp); ret = -ENOMEM; goto err; } lzo_blocks[i]->sync_bmp = sync_bmp; lzo_blocks[i]->sync_bmp_nbits = bmp_size; /* alloc the working space for the compressed block */ ret = regcache_lzo_prepare(lzo_blocks[i]); if (ret < 0) goto err; } blksize = regcache_lzo_get_blksize(map); p = map->reg_defaults_raw; end = map->reg_defaults_raw + map->cache_size_raw; /* compress the register map and fill the lzo blocks */ for (i = 0; i < blkcount; i++, p += blksize) { lzo_blocks[i]->src = p; if (p + blksize > end) lzo_blocks[i]->src_len = end - p; else lzo_blocks[i]->src_len = blksize; ret = regcache_lzo_compress_cache_block(map, lzo_blocks[i]); if (ret < 0) goto err; lzo_blocks[i]->decompressed_size = lzo_blocks[i]->src_len; } return 0; err: regcache_lzo_exit(map); return ret; } static int regcache_lzo_exit(struct regmap *map) { struct regcache_lzo_ctx **lzo_blocks; int i, blkcount; lzo_blocks = map->cache; if (!lzo_blocks) return 0; blkcount = regcache_lzo_block_count(map); /* * the pointer to the bitmap used for syncing the cache * is shared amongst all lzo_blocks. Ensure it is freed * only once. */ if (lzo_blocks[0]) kfree(lzo_blocks[0]->sync_bmp); for (i = 0; i < blkcount; i++) { if (lzo_blocks[i]) { kfree(lzo_blocks[i]->wmem); kfree(lzo_blocks[i]->dst); } /* each lzo_block is a pointer returned by kmalloc or NULL */ kfree(lzo_blocks[i]); } kfree(lzo_blocks); map->cache = NULL; return 0; } static int regcache_lzo_read(struct regmap *map, unsigned int reg, unsigned int *value) { struct regcache_lzo_ctx *lzo_block, **lzo_blocks; int ret, blkindex, blkpos; size_t blksize, tmp_dst_len; void *tmp_dst; /* index of the compressed lzo block */ blkindex = regcache_lzo_get_blkindex(map, reg); /* register index within the decompressed block */ blkpos = regcache_lzo_get_blkpos(map, reg); /* size of the compressed block */ blksize = regcache_lzo_get_blksize(map); lzo_blocks = map->cache; lzo_block = lzo_blocks[blkindex]; /* save the pointer and length of the compressed block */ tmp_dst = lzo_block->dst; tmp_dst_len = lzo_block->dst_len; /* prepare the source to be the compressed block */ lzo_block->src = lzo_block->dst; lzo_block->src_len = lzo_block->dst_len; /* decompress the block */ ret = regcache_lzo_decompress_cache_block(map, lzo_block); if (ret >= 0) /* fetch the value from the cache */ *value = regcache_get_val(lzo_block->dst, blkpos, map->cache_word_size); kfree(lzo_block->dst); /* restore the pointer and length of the compressed block */ lzo_block->dst = tmp_dst; lzo_block->dst_len = tmp_dst_len; return ret; } static int regcache_lzo_write(struct regmap *map, unsigned int reg, unsigned int value) { struct regcache_lzo_ctx *lzo_block, **lzo_blocks; int ret, blkindex, blkpos; size_t blksize, tmp_dst_len; void *tmp_dst; /* index of the compressed lzo block */ blkindex = regcache_lzo_get_blkindex(map, reg); /* register index within the decompressed block */ blkpos = regcache_lzo_get_blkpos(map, reg); /* size of the compressed block */ blksize = regcache_lzo_get_blksize(map); lzo_blocks = map->cache; lzo_block = lzo_blocks[blkindex]; /* save the pointer and length of the compressed block */ tmp_dst = lzo_block->dst; tmp_dst_len = lzo_block->dst_len; /* prepare the source to be the compressed block */ lzo_block->src = lzo_block->dst; lzo_block->src_len = lzo_block->dst_len; /* decompress the block */ ret = regcache_lzo_decompress_cache_block(map, lzo_block); if (ret < 0) { kfree(lzo_block->dst); goto out; } /* write the new value to the cache */ if (regcache_set_val(lzo_block->dst, blkpos, value, map->cache_word_size)) { kfree(lzo_block->dst); goto out; } /* prepare the source to be the decompressed block */ lzo_block->src = lzo_block->dst; lzo_block->src_len = lzo_block->dst_len; /* compress the block */ ret = regcache_lzo_compress_cache_block(map, lzo_block); if (ret < 0) { kfree(lzo_block->dst); kfree(lzo_block->src); goto out; } /* set the bit so we know we have to sync this register */ set_bit(reg, lzo_block->sync_bmp); kfree(tmp_dst); kfree(lzo_block->src); return 0; out: lzo_block->dst = tmp_dst; lzo_block->dst_len = tmp_dst_len; return ret; } static int regcache_lzo_sync(struct regmap *map, unsigned int min, unsigned int max) { struct regcache_lzo_ctx **lzo_blocks; unsigned int val; int i; int ret; lzo_blocks = map->cache; i = min; for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { if (i > max) continue; ret = regcache_read(map, i, &val); if (ret) return ret; /* Is this the hardware default? If so skip. */ ret = regcache_lookup_reg(map, i); if (ret > 0 && val == map->reg_defaults[ret].def) continue; map->cache_bypass = 1; ret = _regmap_write(map, i, val); map->cache_bypass = 0; if (ret) return ret; dev_dbg(map->dev, "Synced register %#x, value %#x\n", i, val); } return 0; } struct regcache_ops regcache_lzo_ops = { .type = REGCACHE_COMPRESSED, .name = "lzo", .init = regcache_lzo_init, .exit = regcache_lzo_exit, .read = regcache_lzo_read, .write = regcache_lzo_write, .sync = regcache_lzo_sync };
gpl-2.0
iamroot12CD/linux
drivers/media/usb/gspca/etoms.c
4411
22572
/* * Etoms Et61x151 GPL Linux driver by Michel Xhaard (09/09/2004) * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "etoms" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("Etoms USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned char autogain; char sensor; #define SENSOR_PAS106 0 #define SENSOR_TAS5130CXX 1 signed char ag_cnt; #define AG_CNT_START 13 }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, /* {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, */ }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; #define ETOMS_ALT_SIZE_1000 12 #define ET_GPIO_DIR_CTRL 0x04 /* Control IO bit[0..5] (0 in 1 out) */ #define ET_GPIO_OUT 0x05 /* Only IO data */ #define ET_GPIO_IN 0x06 /* Read Only IO data */ #define ET_RESET_ALL 0x03 #define ET_ClCK 0x01 #define ET_CTRL 0x02 /* enable i2c OutClck Powerdown mode */ #define ET_COMP 0x12 /* Compression register */ #define ET_MAXQt 0x13 #define ET_MINQt 0x14 #define ET_COMP_VAL0 0x02 #define ET_COMP_VAL1 0x03 #define ET_REG1d 0x1d #define ET_REG1e 0x1e #define ET_REG1f 0x1f #define ET_REG20 0x20 #define ET_REG21 0x21 #define ET_REG22 0x22 #define ET_REG23 0x23 #define ET_REG24 0x24 #define ET_REG25 0x25 /* base registers for luma calculation */ #define ET_LUMA_CENTER 0x39 #define ET_G_RED 0x4d #define ET_G_GREEN1 0x4e #define ET_G_BLUE 0x4f #define ET_G_GREEN2 0x50 #define ET_G_GR_H 0x51 #define ET_G_GB_H 0x52 #define ET_O_RED 0x34 #define ET_O_GREEN1 0x35 #define ET_O_BLUE 0x36 #define ET_O_GREEN2 0x37 #define ET_SYNCHRO 0x68 #define ET_STARTX 0x69 #define ET_STARTY 0x6a #define ET_WIDTH_LOW 0x6b #define ET_HEIGTH_LOW 0x6c #define ET_W_H_HEIGTH 0x6d #define ET_REG6e 0x6e /* OBW */ #define ET_REG6f 0x6f /* OBW */ #define ET_REG70 0x70 /* OBW_AWB */ #define ET_REG71 0x71 /* OBW_AWB */ #define ET_REG72 0x72 /* OBW_AWB */ #define ET_REG73 0x73 /* Clkdelay ns */ #define ET_REG74 0x74 /* test pattern */ #define ET_REG75 0x75 /* test pattern */ #define ET_I2C_CLK 0x8c #define ET_PXL_CLK 0x60 #define ET_I2C_BASE 0x89 #define ET_I2C_COUNT 0x8a #define ET_I2C_PREFETCH 0x8b #define ET_I2C_REG 0x88 #define ET_I2C_DATA7 0x87 #define ET_I2C_DATA6 0x86 #define ET_I2C_DATA5 0x85 #define ET_I2C_DATA4 0x84 #define ET_I2C_DATA3 0x83 #define ET_I2C_DATA2 0x82 #define ET_I2C_DATA1 0x81 #define ET_I2C_DATA0 0x80 #define PAS106_REG2 0x02 /* pxlClk = systemClk/(reg2) */ #define PAS106_REG3 0x03 /* line/frame H [11..4] */ #define PAS106_REG4 0x04 /* line/frame L [3..0] */ #define PAS106_REG5 0x05 /* exposure time line offset(default 5) */ #define PAS106_REG6 0x06 /* exposure time pixel offset(default 6) */ #define PAS106_REG7 0x07 /* signbit Dac (default 0) */ #define PAS106_REG9 0x09 #define PAS106_REG0e 0x0e /* global gain [4..0](default 0x0e) */ #define PAS106_REG13 0x13 /* end i2c write */ static const __u8 GainRGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 }; static const __u8 I2c2[] = { 0x08, 0x08, 0x08, 0x08, 0x0d }; static const __u8 I2c3[] = { 0x12, 0x05 }; static const __u8 I2c4[] = { 0x41, 0x08 }; /* read 'len' bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 index, __u16 len) { struct usb_device *dev = gspca_dev->dev; if (len > USB_BUF_SZ) { PERR("reg_r: buffer overflow\n"); return; } usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, len, 500); PDEBUG(D_USBI, "reg read [%02x] -> %02x ..", index, gspca_dev->usb_buf[0]); } static void reg_w_val(struct gspca_dev *gspca_dev, __u16 index, __u8 val) { struct usb_device *dev = gspca_dev->dev; gspca_dev->usb_buf[0] = val; usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, 1, 500); } static void reg_w(struct gspca_dev *gspca_dev, __u16 index, const __u8 *buffer, __u16 len) { struct usb_device *dev = gspca_dev->dev; if (len > USB_BUF_SZ) { pr_err("reg_w: buffer overflow\n"); return; } PDEBUG(D_USBO, "reg write [%02x] = %02x..", index, *buffer); memcpy(gspca_dev->usb_buf, buffer, len); usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, len, 500); } static int i2c_w(struct gspca_dev *gspca_dev, __u8 reg, const __u8 *buffer, int len, __u8 mode) { /* buffer should be [D0..D7] */ __u8 ptchcount; /* set the base address */ reg_w_val(gspca_dev, ET_I2C_BASE, 0x40); /* sensor base for the pas106 */ /* set count and prefetch */ ptchcount = ((len & 0x07) << 4) | (mode & 0x03); reg_w_val(gspca_dev, ET_I2C_COUNT, ptchcount); /* set the register base */ reg_w_val(gspca_dev, ET_I2C_REG, reg); while (--len >= 0) reg_w_val(gspca_dev, ET_I2C_DATA0 + len, buffer[len]); return 0; } static int i2c_r(struct gspca_dev *gspca_dev, __u8 reg) { /* set the base address */ reg_w_val(gspca_dev, ET_I2C_BASE, 0x40); /* sensor base for the pas106 */ /* set count and prefetch (cnd: 4 bits - mode: 4 bits) */ reg_w_val(gspca_dev, ET_I2C_COUNT, 0x11); reg_w_val(gspca_dev, ET_I2C_REG, reg); /* set the register base */ reg_w_val(gspca_dev, ET_I2C_PREFETCH, 0x02); /* prefetch */ reg_w_val(gspca_dev, ET_I2C_PREFETCH, 0x00); reg_r(gspca_dev, ET_I2C_DATA0, 1); /* read one byte */ return 0; } static int Et_WaitStatus(struct gspca_dev *gspca_dev) { int retry = 10; while (retry--) { reg_r(gspca_dev, ET_ClCK, 1); if (gspca_dev->usb_buf[0] != 0) return 1; } return 0; } static int et_video(struct gspca_dev *gspca_dev, int on) { int ret; reg_w_val(gspca_dev, ET_GPIO_OUT, on ? 0x10 /* startvideo - set Bit5 */ : 0); /* stopvideo */ ret = Et_WaitStatus(gspca_dev); if (ret != 0) PERR("timeout video on/off"); return ret; } static void Et_init2(struct gspca_dev *gspca_dev) { __u8 value; static const __u8 FormLine[] = { 0x84, 0x03, 0x14, 0xf4, 0x01, 0x05 }; PDEBUG(D_STREAM, "Open Init2 ET"); reg_w_val(gspca_dev, ET_GPIO_DIR_CTRL, 0x2f); reg_w_val(gspca_dev, ET_GPIO_OUT, 0x10); reg_r(gspca_dev, ET_GPIO_IN, 1); reg_w_val(gspca_dev, ET_ClCK, 0x14); /* 0x14 // 0x16 enabled pattern */ reg_w_val(gspca_dev, ET_CTRL, 0x1b); /* compression et subsampling */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = ET_COMP_VAL1; /* 320 */ else value = ET_COMP_VAL0; /* 640 */ reg_w_val(gspca_dev, ET_COMP, value); reg_w_val(gspca_dev, ET_MAXQt, 0x1f); reg_w_val(gspca_dev, ET_MINQt, 0x04); /* undocumented registers */ reg_w_val(gspca_dev, ET_REG1d, 0xff); reg_w_val(gspca_dev, ET_REG1e, 0xff); reg_w_val(gspca_dev, ET_REG1f, 0xff); reg_w_val(gspca_dev, ET_REG20, 0x35); reg_w_val(gspca_dev, ET_REG21, 0x01); reg_w_val(gspca_dev, ET_REG22, 0x00); reg_w_val(gspca_dev, ET_REG23, 0xff); reg_w_val(gspca_dev, ET_REG24, 0xff); reg_w_val(gspca_dev, ET_REG25, 0x0f); /* colors setting */ reg_w_val(gspca_dev, 0x30, 0x11); /* 0x30 */ reg_w_val(gspca_dev, 0x31, 0x40); reg_w_val(gspca_dev, 0x32, 0x00); reg_w_val(gspca_dev, ET_O_RED, 0x00); /* 0x34 */ reg_w_val(gspca_dev, ET_O_GREEN1, 0x00); reg_w_val(gspca_dev, ET_O_BLUE, 0x00); reg_w_val(gspca_dev, ET_O_GREEN2, 0x00); /*************/ reg_w_val(gspca_dev, ET_G_RED, 0x80); /* 0x4d */ reg_w_val(gspca_dev, ET_G_GREEN1, 0x80); reg_w_val(gspca_dev, ET_G_BLUE, 0x80); reg_w_val(gspca_dev, ET_G_GREEN2, 0x80); reg_w_val(gspca_dev, ET_G_GR_H, 0x00); reg_w_val(gspca_dev, ET_G_GB_H, 0x00); /* 0x52 */ /* Window control registers */ reg_w_val(gspca_dev, 0x61, 0x80); /* use cmc_out */ reg_w_val(gspca_dev, 0x62, 0x02); reg_w_val(gspca_dev, 0x63, 0x03); reg_w_val(gspca_dev, 0x64, 0x14); reg_w_val(gspca_dev, 0x65, 0x0e); reg_w_val(gspca_dev, 0x66, 0x02); reg_w_val(gspca_dev, 0x67, 0x02); /**************************************/ reg_w_val(gspca_dev, ET_SYNCHRO, 0x8f); /* 0x68 */ reg_w_val(gspca_dev, ET_STARTX, 0x69); /* 0x6a //0x69 */ reg_w_val(gspca_dev, ET_STARTY, 0x0d); /* 0x0d //0x0c */ reg_w_val(gspca_dev, ET_WIDTH_LOW, 0x80); reg_w_val(gspca_dev, ET_HEIGTH_LOW, 0xe0); reg_w_val(gspca_dev, ET_W_H_HEIGTH, 0x60); /* 6d */ reg_w_val(gspca_dev, ET_REG6e, 0x86); reg_w_val(gspca_dev, ET_REG6f, 0x01); reg_w_val(gspca_dev, ET_REG70, 0x26); reg_w_val(gspca_dev, ET_REG71, 0x7a); reg_w_val(gspca_dev, ET_REG72, 0x01); /* Clock Pattern registers ***************** */ reg_w_val(gspca_dev, ET_REG73, 0x00); reg_w_val(gspca_dev, ET_REG74, 0x18); /* 0x28 */ reg_w_val(gspca_dev, ET_REG75, 0x0f); /* 0x01 */ /**********************************************/ reg_w_val(gspca_dev, 0x8a, 0x20); reg_w_val(gspca_dev, 0x8d, 0x0f); reg_w_val(gspca_dev, 0x8e, 0x08); /**************************************/ reg_w_val(gspca_dev, 0x03, 0x08); reg_w_val(gspca_dev, ET_PXL_CLK, 0x03); reg_w_val(gspca_dev, 0x81, 0xff); reg_w_val(gspca_dev, 0x80, 0x00); reg_w_val(gspca_dev, 0x81, 0xff); reg_w_val(gspca_dev, 0x80, 0x20); reg_w_val(gspca_dev, 0x03, 0x01); reg_w_val(gspca_dev, 0x03, 0x00); reg_w_val(gspca_dev, 0x03, 0x08); /********************************************/ /* reg_r(gspca_dev, ET_I2C_BASE, 1); always 0x40 as the pas106 ??? */ /* set the sensor */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = 0x04; /* 320 */ else /* 640 */ value = 0x1e; /* 0x17 * setting PixelClock * 0x03 mean 24/(3+1) = 6 Mhz * 0x05 -> 24/(5+1) = 4 Mhz * 0x0b -> 24/(11+1) = 2 Mhz * 0x17 -> 24/(23+1) = 1 Mhz */ reg_w_val(gspca_dev, ET_PXL_CLK, value); /* now set by fifo the FormatLine setting */ reg_w(gspca_dev, 0x62, FormLine, 6); /* set exposure times [ 0..0x78] 0->longvalue 0x78->shortvalue */ reg_w_val(gspca_dev, 0x81, 0x47); /* 0x47; */ reg_w_val(gspca_dev, 0x80, 0x40); /* 0x40; */ /* Pedro change */ /* Brightness change Brith+ decrease value */ /* Brigth- increase value */ /* original value = 0x70; */ reg_w_val(gspca_dev, 0x81, 0x30); /* 0x20; - set brightness */ reg_w_val(gspca_dev, 0x80, 0x20); /* 0x20; */ } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { int i; for (i = 0; i < 4; i++) reg_w_val(gspca_dev, ET_O_RED + i, val); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { __u8 RGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 }; memset(RGBG, val, sizeof(RGBG) - 2); reg_w(gspca_dev, ET_G_RED, RGBG, 6); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; __u8 I2cc[] = { 0x05, 0x02, 0x02, 0x05, 0x0d }; __u8 i2cflags = 0x01; /* __u8 green = 0; */ I2cc[3] = val; /* red */ I2cc[0] = 15 - val; /* blue */ /* green = 15 - ((((7*I2cc[0]) >> 2 ) + I2cc[3]) >> 1); */ /* I2cc[1] = I2cc[2] = green; */ if (sd->sensor == SENSOR_PAS106) { i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); i2c_w(gspca_dev, PAS106_REG9, I2cc, sizeof I2cc, 1); } /* PDEBUG(D_CONF , "Etoms red %d blue %d green %d", I2cc[3], I2cc[0], green); */ } static s32 getcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { /* i2c_r(gspca_dev, PAS106_REG9); * blue */ i2c_r(gspca_dev, PAS106_REG9 + 3); /* red */ return gspca_dev->usb_buf[0] & 0x0f; } return 0; } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->autogain) sd->ag_cnt = AG_CNT_START; else sd->ag_cnt = -1; } static void Et_init1(struct gspca_dev *gspca_dev) { __u8 value; /* __u8 I2c0 [] = {0x0a, 0x12, 0x05, 0x22, 0xac, 0x00, 0x01, 0x00}; */ __u8 I2c0[] = { 0x0a, 0x12, 0x05, 0x6d, 0xcd, 0x00, 0x01, 0x00 }; /* try 1/120 0x6d 0xcd 0x40 */ /* __u8 I2c0 [] = {0x0a, 0x12, 0x05, 0xfe, 0xfe, 0xc0, 0x01, 0x00}; * 1/60000 hmm ?? */ PDEBUG(D_STREAM, "Open Init1 ET"); reg_w_val(gspca_dev, ET_GPIO_DIR_CTRL, 7); reg_r(gspca_dev, ET_GPIO_IN, 1); reg_w_val(gspca_dev, ET_RESET_ALL, 1); reg_w_val(gspca_dev, ET_RESET_ALL, 0); reg_w_val(gspca_dev, ET_ClCK, 0x10); reg_w_val(gspca_dev, ET_CTRL, 0x19); /* compression et subsampling */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = ET_COMP_VAL1; else value = ET_COMP_VAL0; PDEBUG(D_STREAM, "Open mode %d Compression %d", gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv, value); reg_w_val(gspca_dev, ET_COMP, value); reg_w_val(gspca_dev, ET_MAXQt, 0x1d); reg_w_val(gspca_dev, ET_MINQt, 0x02); /* undocumented registers */ reg_w_val(gspca_dev, ET_REG1d, 0xff); reg_w_val(gspca_dev, ET_REG1e, 0xff); reg_w_val(gspca_dev, ET_REG1f, 0xff); reg_w_val(gspca_dev, ET_REG20, 0x35); reg_w_val(gspca_dev, ET_REG21, 0x01); reg_w_val(gspca_dev, ET_REG22, 0x00); reg_w_val(gspca_dev, ET_REG23, 0xf7); reg_w_val(gspca_dev, ET_REG24, 0xff); reg_w_val(gspca_dev, ET_REG25, 0x07); /* colors setting */ reg_w_val(gspca_dev, ET_G_RED, 0x80); reg_w_val(gspca_dev, ET_G_GREEN1, 0x80); reg_w_val(gspca_dev, ET_G_BLUE, 0x80); reg_w_val(gspca_dev, ET_G_GREEN2, 0x80); reg_w_val(gspca_dev, ET_G_GR_H, 0x00); reg_w_val(gspca_dev, ET_G_GB_H, 0x00); /* Window control registers */ reg_w_val(gspca_dev, ET_SYNCHRO, 0xf0); reg_w_val(gspca_dev, ET_STARTX, 0x56); /* 0x56 */ reg_w_val(gspca_dev, ET_STARTY, 0x05); /* 0x04 */ reg_w_val(gspca_dev, ET_WIDTH_LOW, 0x60); reg_w_val(gspca_dev, ET_HEIGTH_LOW, 0x20); reg_w_val(gspca_dev, ET_W_H_HEIGTH, 0x50); reg_w_val(gspca_dev, ET_REG6e, 0x86); reg_w_val(gspca_dev, ET_REG6f, 0x01); reg_w_val(gspca_dev, ET_REG70, 0x86); reg_w_val(gspca_dev, ET_REG71, 0x14); reg_w_val(gspca_dev, ET_REG72, 0x00); /* Clock Pattern registers */ reg_w_val(gspca_dev, ET_REG73, 0x00); reg_w_val(gspca_dev, ET_REG74, 0x00); reg_w_val(gspca_dev, ET_REG75, 0x0a); reg_w_val(gspca_dev, ET_I2C_CLK, 0x04); reg_w_val(gspca_dev, ET_PXL_CLK, 0x01); /* set the sensor */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { I2c0[0] = 0x06; i2c_w(gspca_dev, PAS106_REG2, I2c0, sizeof I2c0, 1); i2c_w(gspca_dev, PAS106_REG9, I2c2, sizeof I2c2, 1); value = 0x06; i2c_w(gspca_dev, PAS106_REG2, &value, 1, 1); i2c_w(gspca_dev, PAS106_REG3, I2c3, sizeof I2c3, 1); /* value = 0x1f; */ value = 0x04; i2c_w(gspca_dev, PAS106_REG0e, &value, 1, 1); } else { I2c0[0] = 0x0a; i2c_w(gspca_dev, PAS106_REG2, I2c0, sizeof I2c0, 1); i2c_w(gspca_dev, PAS106_REG9, I2c2, sizeof I2c2, 1); value = 0x0a; i2c_w(gspca_dev, PAS106_REG2, &value, 1, 1); i2c_w(gspca_dev, PAS106_REG3, I2c3, sizeof I2c3, 1); value = 0x04; /* value = 0x10; */ i2c_w(gspca_dev, PAS106_REG0e, &value, 1, 1); /* bit 2 enable bit 1:2 select 0 1 2 3 value = 0x07; * curve 0 * i2c_w(gspca_dev, PAS106_REG0f, &value, 1, 1); */ } /* value = 0x01; */ /* value = 0x22; */ /* i2c_w(gspca_dev, PAS106_REG5, &value, 1, 1); */ /* magnetude and sign bit for DAC */ i2c_w(gspca_dev, PAS106_REG7, I2c4, sizeof I2c4, 1); /* now set by fifo the whole colors setting */ reg_w(gspca_dev, ET_G_RED, GainRGBG, 6); setcolors(gspca_dev, getcolors(gspca_dev)); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; sd->sensor = id->driver_info; if (sd->sensor == SENSOR_PAS106) { cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); } else { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } sd->ag_cnt = -1; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) Et_init1(gspca_dev); else Et_init2(gspca_dev); reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); et_video(gspca_dev, 0); /* video off */ return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) Et_init1(gspca_dev); else Et_init2(gspca_dev); setautogain(gspca_dev); reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); et_video(gspca_dev, 1); /* video on */ return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { et_video(gspca_dev, 0); /* video off */ } static __u8 Et_getgainG(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { i2c_r(gspca_dev, PAS106_REG0e); PDEBUG(D_CONF, "Etoms gain G %d", gspca_dev->usb_buf[0]); return gspca_dev->usb_buf[0]; } return 0x1f; } static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { __u8 i2cflags = 0x01; i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); i2c_w(gspca_dev, PAS106_REG0e, &gain, 1, 1); } } #define BLIMIT(bright) \ (u8)((bright > 0x1f) ? 0x1f : ((bright < 4) ? 3 : bright)) #define LIMIT(color) \ (u8)((color > 0xff) ? 0xff : ((color < 0) ? 0 : color)) static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 luma; __u8 luma_mean = 128; __u8 luma_delta = 20; __u8 spring = 4; int Gbright; __u8 r, g, b; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; Gbright = Et_getgainG(gspca_dev); reg_r(gspca_dev, ET_LUMA_CENTER, 4); g = (gspca_dev->usb_buf[0] + gspca_dev->usb_buf[3]) >> 1; r = gspca_dev->usb_buf[1]; b = gspca_dev->usb_buf[2]; r = ((r << 8) - (r << 4) - (r << 3)) >> 10; b = ((b << 7) >> 10); g = ((g << 9) + (g << 7) + (g << 5)) >> 10; luma = LIMIT(r + g + b); PDEBUG(D_FRAM, "Etoms luma G %d", luma); if (luma < luma_mean - luma_delta || luma > luma_mean + luma_delta) { Gbright += (luma_mean - luma) >> spring; Gbright = BLIMIT(Gbright); PDEBUG(D_FRAM, "Etoms Gbright %d", Gbright); Et_setgainG(gspca_dev, (__u8) Gbright); } } #undef BLIMIT #undef LIMIT static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { int seqframe; seqframe = data[0] & 0x3f; len = (int) (((data[0] & 0xc0) << 2) | data[1]); if (seqframe == 0x3f) { PDEBUG(D_FRAM, "header packet found datalength %d !!", len); PDEBUG(D_FRAM, "G %d R %d G %d B %d", data[2], data[3], data[4], data[5]); data += 30; /* don't change datalength as the chips provided it */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } if (len) { data += 8; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } else { /* Drop Packet */ gspca_dev->last_packet_type = DISCARD_PACKET; } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: sd->autogain = ctrl->val; setautogain(gspca_dev); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 1, 127, 1, 63); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); if (sd->sensor == SENSOR_PAS106) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 15, 1, 7); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
jgcaap/NewKernel
arch/arm/mach-mxs/devices/platform-mxsfb.c
5435
1277
/* * Copyright (C) 2011 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/mx23.h> #include <mach/mx28.h> #include <mach/devices-common.h> #include <mach/mxsfb.h> #ifdef CONFIG_SOC_IMX23 struct platform_device *__init mx23_add_mxsfb( const struct mxsfb_platform_data *pdata) { struct resource res[] = { { .start = MX23_LCDIF_BASE_ADDR, .end = MX23_LCDIF_BASE_ADDR + SZ_8K - 1, .flags = IORESOURCE_MEM, }, }; return mxs_add_platform_device_dmamask("imx23-fb", -1, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); } #endif /* ifdef CONFIG_SOC_IMX23 */ #ifdef CONFIG_SOC_IMX28 struct platform_device *__init mx28_add_mxsfb( const struct mxsfb_platform_data *pdata) { struct resource res[] = { { .start = MX28_LCDIF_BASE_ADDR, .end = MX28_LCDIF_BASE_ADDR + SZ_8K - 1, .flags = IORESOURCE_MEM, }, }; return mxs_add_platform_device_dmamask("imx28-fb", -1, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); } #endif /* ifdef CONFIG_SOC_IMX28 */
gpl-2.0
nmenon/ti-linux-kernel-nm
arch/powerpc/platforms/83xx/mpc832x_rdb.c
8763
5852
/* * arch/powerpc/platforms/83xx/mpc832x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2007. All rights reserved. * * Description: * MPC832x RDB board specific routines. * This file is based on mpc832x_mds.c and mpc8313_rdb.c * Author: Michael Barkowski <michael.barkowski@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/of_platform.h> #include <linux/fsl_devices.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_QUICC_ENGINE static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *dev, bool on)) { struct device_node *np; unsigned int i = 0; for_each_compatible_node(np, type, compatible) { int ret; unsigned int j; const void *prop; struct resource res[2]; struct platform_device *pdev; struct fsl_spi_platform_data pdata = { .cs_control = cs_control, }; memset(res, 0, sizeof(res)); pdata.sysclk = sysclk; prop = of_get_property(np, "reg", NULL); if (!prop) goto err; pdata.bus_num = *(u32 *)prop; prop = of_get_property(np, "cell-index", NULL); if (prop) i = *(u32 *)prop; prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata.flags = SPI_QE_CPU_MODE; for (j = 0; j < num_board_infos; j++) { if (board_infos[j].bus_num == pdata.bus_num) pdata.max_chipselect++; } if (!pdata.max_chipselect) continue; ret = of_address_to_resource(np, 0, &res[0]); if (ret) goto err; ret = of_irq_to_resource(np, 0, &res[1]); if (ret == NO_IRQ) goto err; pdev = platform_device_alloc("mpc83xx_spi", i); if (!pdev) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto unreg; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto unreg; ret = platform_device_add(pdev); if (ret) goto unreg; goto next; unreg: platform_device_del(pdev); err: pr_err("%s: registration failed\n", np->full_name); next: i++; } return i; } static int __init fsl_spi_init(struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *spi, bool on)) { u32 sysclk = -1; int ret; /* SPI controller is either clocked from QE or SoC clock */ sysclk = get_brgfreq(); if (sysclk == -1) { sysclk = fsl_get_sys_freq(); if (sysclk == -1) return -ENODEV; } ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, num_board_infos, cs_control); if (!ret) of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, num_board_infos, cs_control); return spi_register_board_info(board_infos, num_board_infos); } static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) { pr_debug("%s %d %d\n", __func__, spi->chip_select, on); par_io_data_set(3, 13, on); } static struct mmc_spi_platform_data mpc832x_mmc_pdata = { .ocr_mask = MMC_VDD_33_34, }; static struct spi_board_info mpc832x_spi_boardinfo = { .bus_num = 0x4c0, .chip_select = 0, .max_speed_hz = 50000000, .modalias = "mmc_spi", .platform_data = &mpc832x_mmc_pdata, }; static int __init mpc832x_spi_init(void) { par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ par_io_config_pin(3, 3, 2, 0, 1, 0); /* SPI1 SEL, I */ par_io_config_pin(3, 13, 1, 0, 0, 0); /* !SD_CS, O */ par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ /* * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); #endif /* CONFIG_QUICC_ENGINE */ /* ************************************************************************ * * Setup the architecture * */ static void __init mpc832x_rdb_setup_arch(void) { #if defined(CONFIG_QUICC_ENGINE) struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc832x_rdb_setup_arch()", 0); mpc83xx_setup_pci(); #ifdef CONFIG_QUICC_ENGINE qe_reset(); if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) par_io_of_config(np); } #endif /* CONFIG_QUICC_ENGINE */ } machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc832x_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC832xRDB"); } define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .probe = mpc832x_rdb_probe, .setup_arch = mpc832x_rdb_setup_arch, .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
lyapota/enru-3.1.10-g7f360be
fs/fuse/inode.c
60
29281
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/exportfs.h> MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); static int set_global_limit(const char *val, struct kernel_param *kp); unsigned max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); __MODULE_PARM_TYPE(max_user_bgreq, "uint"); MODULE_PARM_DESC(max_user_bgreq, "Global limit for the maximum number of backgrounded requests an " "unprivileged user can set"); unsigned max_user_congthresh; module_param_call(max_user_congthresh, set_global_limit, param_get_uint, &max_user_congthresh, 0644); __MODULE_PARM_TYPE(max_user_congthresh, "uint"); MODULE_PARM_DESC(max_user_congthresh, "Global limit for the maximum congestion threshold an " "unprivileged user can set"); #define FUSE_SUPER_MAGIC 0x65735546 #define FUSE_DEFAULT_BLKSIZE 512 /** Maximum number of outstanding background requests */ #define FUSE_DEFAULT_MAX_BACKGROUND 12 /** Congestion starts at 75% of maximum */ #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) struct fuse_mount_data { int fd; unsigned rootmode; unsigned user_id; unsigned group_id; unsigned fd_present:1; unsigned rootmode_present:1; unsigned user_id_present:1; unsigned group_id_present:1; unsigned flags; unsigned max_read; unsigned blksize; unsigned allow_utime; }; struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); } static struct inode *fuse_alloc_inode(struct super_block *sb) { struct inode *inode; struct fuse_inode *fi; inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); if (!inode) return NULL; fi = get_fuse_inode(inode); fi->i_time = 0; fi->nodeid = 0; fi->nlookup = 0; fi->attr_version = 0; fi->writectr = 0; INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); init_waitqueue_head(&fi->page_waitq); fi->forget = fuse_alloc_forget(); if (!fi->forget) { kmem_cache_free(fuse_inode_cachep, inode); return NULL; } return inode; } static void fuse_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(fuse_inode_cachep, inode); } static void fuse_destroy_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!list_empty(&fi->write_files)); BUG_ON(!list_empty(&fi->queued_writes)); kfree(fi->forget); call_rcu(&inode->i_rcu, fuse_i_callback); } static void fuse_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (inode->i_sb->s_flags & MS_ACTIVE) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup); fi->forget = NULL; } } static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) { if (*flags & MS_MANDLOCK) return -EINVAL; return 0; } void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, u64 attr_valid) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; inode->i_ino = attr->ino; inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); inode->i_nlink = attr->nlink; inode->i_uid = attr->uid; inode->i_gid = attr->gid; inode->i_blocks = attr->blocks; inode->i_atime.tv_sec = attr->atime; inode->i_atime.tv_nsec = attr->atimensec; inode->i_mtime.tv_sec = attr->mtime; inode->i_mtime.tv_nsec = attr->mtimensec; inode->i_ctime.tv_sec = attr->ctime; inode->i_ctime.tv_nsec = attr->ctimensec; if (attr->blksize != 0) inode->i_blkbits = ilog2(attr->blksize); else inode->i_blkbits = inode->i_sb->s_blocksize_bits; /* * Don't set the sticky bit in i_mode, unless we want the VFS * to check permissions. This prevents failures due to the * check in may_delete(). */ fi->orig_i_mode = inode->i_mode; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) inode->i_mode &= ~S_ISVTX; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { spin_unlock(&fc->lock); return; } fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); if (S_ISREG(inode->i_mode) && oldsize != attr->size) { truncate_pagecache(inode, oldsize, attr->size); invalidate_inode_pages2(inode->i_mapping); } } static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) { inode->i_mode = attr->mode & S_IFMT; inode->i_size = attr->size; if (S_ISREG(inode->i_mode)) { fuse_init_common(inode); fuse_init_file_inode(inode); } else if (S_ISDIR(inode->i_mode)) fuse_init_dir(inode); else if (S_ISLNK(inode->i_mode)) fuse_init_symlink(inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { fuse_init_common(inode); init_special_inode(inode, inode->i_mode, new_decode_dev(attr->rdev)); } else BUG(); } int fuse_inode_eq(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; if (get_node_id(inode) == nodeid) return 1; else return 0; } static int fuse_inode_set(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; get_fuse_inode(inode)->nodeid = nodeid; return 0; } struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int generation, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct inode *inode; struct fuse_inode *fi; struct fuse_conn *fc = get_fuse_conn_super(sb); retry: inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); if (!inode) return NULL; if ((inode->i_state & I_NEW)) { inode->i_flags |= S_NOATIME|S_NOCMTIME; inode->i_generation = generation; inode->i_data.backing_dev_info = &fc->bdi; fuse_init_inode(inode, attr); unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { /* Inode has changed type, any I/O on the old should fail */ make_bad_inode(inode); iput(inode); goto retry; } fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->nlookup++; spin_unlock(&fc->lock); fuse_change_attributes(inode, attr, attr_valid, attr_version); return inode; } int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid); if (!inode) return -ENOENT; fuse_invalidate_attr(inode); if (offset >= 0) { pg_start = offset >> PAGE_CACHE_SHIFT; if (len <= 0) pg_end = -1; else pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } iput(inode); return 0; } static void fuse_umount_begin(struct super_block *sb) { fuse_abort_conn(get_fuse_conn_super(sb)); } static void fuse_send_destroy(struct fuse_conn *fc) { struct fuse_req *req = fc->destroy_req; if (req && fc->conn_init) { fc->destroy_req = NULL; req->in.h.opcode = FUSE_DESTROY; req->force = 1; fuse_request_send(fc, req); fuse_put_request(fc, req); } } static void fuse_bdi_destroy(struct fuse_conn *fc) { if (fc->bdi_initialized) bdi_destroy(&fc->bdi); } void fuse_conn_kill(struct fuse_conn *fc) { spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; spin_unlock(&fc->lock); /* Flush all readers on this fs */ kill_fasync(&fc->fasync, SIGIO, POLL_IN); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); wake_up_all(&fc->reserved_req_waitq); mutex_lock(&fuse_mutex); list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); fuse_bdi_destroy(fc); } EXPORT_SYMBOL_GPL(fuse_conn_kill); static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); fuse_send_destroy(fc); fuse_conn_kill(fc); fuse_conn_put(fc); } static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) { stbuf->f_type = FUSE_SUPER_MAGIC; stbuf->f_bsize = attr->bsize; stbuf->f_frsize = attr->frsize; stbuf->f_blocks = attr->blocks; stbuf->f_bfree = attr->bfree; stbuf->f_bavail = attr->bavail; stbuf->f_files = attr->files; stbuf->f_ffree = attr->ffree; stbuf->f_namelen = attr->namelen; /* fsid is left zero */ } static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); struct fuse_req *req; struct fuse_statfs_out outarg; int err; if (!fuse_allow_task(fc, current)) { buf->f_type = FUSE_SUPER_MAGIC; return 0; } req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&outarg, 0, sizeof(outarg)); req->in.numargs = 0; req->in.h.opcode = FUSE_STATFS; req->in.h.nodeid = get_node_id(dentry->d_inode); req->out.numargs = 1; req->out.args[0].size = fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; if (!err) convert_fuse_statfs(buf, &outarg.st); fuse_put_request(fc, req); return err; } enum { OPT_FD, OPT_ROOTMODE, OPT_USER_ID, OPT_GROUP_ID, OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, OPT_BLKSIZE, OPT_ALLOW_UTIME, OPT_ERR }; static const match_table_t tokens = { {OPT_FD, "fd=%u"}, {OPT_ROOTMODE, "rootmode=%o"}, {OPT_USER_ID, "user_id=%u"}, {OPT_GROUP_ID, "group_id=%u"}, {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, {OPT_ALLOW_OTHER, "allow_other"}, {OPT_MAX_READ, "max_read=%u"}, {OPT_BLKSIZE, "blksize=%u"}, {OPT_ALLOW_UTIME, "allow_utime=%o"}, {OPT_ERR, NULL} }; static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) { char *p; memset(d, 0, sizeof(struct fuse_mount_data)); d->max_read = ~0; d->blksize = FUSE_DEFAULT_BLKSIZE; while ((p = strsep(&opt, ",")) != NULL) { int token; int value; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case OPT_FD: if (match_int(&args[0], &value)) return 0; d->fd = value; d->fd_present = 1; break; case OPT_ROOTMODE: if (match_octal(&args[0], &value)) return 0; if (!fuse_valid_type(value)) return 0; d->rootmode = value; d->rootmode_present = 1; break; case OPT_USER_ID: if (match_int(&args[0], &value)) return 0; d->user_id = value; d->user_id_present = 1; break; case OPT_GROUP_ID: if (match_int(&args[0], &value)) return 0; d->group_id = value; d->group_id_present = 1; break; case OPT_DEFAULT_PERMISSIONS: d->flags |= FUSE_DEFAULT_PERMISSIONS; break; case OPT_ALLOW_OTHER: d->flags |= FUSE_ALLOW_OTHER; break; case OPT_MAX_READ: if (match_int(&args[0], &value)) return 0; d->max_read = value; break; case OPT_BLKSIZE: if (!is_bdev || match_int(&args[0], &value)) return 0; d->blksize = value; break; case OPT_ALLOW_UTIME: if (match_octal(&args[0], &value)) return 0; d->allow_utime = value & (S_IWGRP | S_IWOTH); break; default: return 0; } } if (!d->fd_present || !d->rootmode_present || !d->user_id_present || !d->group_id_present) return 0; return 1; } static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt) { struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb); seq_printf(m, ",user_id=%u", fc->user_id); seq_printf(m, ",group_id=%u", fc->group_id); if (fc->flags & FUSE_DEFAULT_PERMISSIONS) seq_puts(m, ",default_permissions"); if (fc->flags & FUSE_ALLOW_OTHER) seq_puts(m, ",allow_other"); if (fc->max_read != ~0) seq_printf(m, ",max_read=%u", fc->max_read); if (mnt->mnt_sb->s_bdev && mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize); if (fc->allow_utime) seq_printf(m, ",allow_utime=%04o", fc->allow_utime); return 0; } void fuse_conn_init(struct fuse_conn *fc) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); init_rwsem(&fc->killsb); atomic_set(&fc->count, 1); init_waitqueue_head(&fc->waitq); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); fc->forget_list_tail = &fc->forget_list_head; atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } EXPORT_SYMBOL_GPL(fuse_conn_init); void fuse_conn_put(struct fuse_conn *fc) { if (atomic_dec_and_test(&fc->count)) { if (fc->destroy_req) fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); fc->release(fc); } } EXPORT_SYMBOL_GPL(fuse_conn_put); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) { atomic_inc(&fc->count); return fc; } EXPORT_SYMBOL_GPL(fuse_conn_get); static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) { struct fuse_attr attr; memset(&attr, 0, sizeof(attr)); attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; return fuse_iget(sb, 1, 0, &attr, 0, 0); } struct fuse_inode_handle { u64 nodeid; u32 generation; }; static struct dentry *fuse_get_dentry(struct super_block *sb, struct fuse_inode_handle *handle) { struct fuse_conn *fc = get_fuse_conn_super(sb); struct inode *inode; struct dentry *entry; int err = -ESTALE; if (handle->nodeid == 0) goto out_err; inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); if (!inode) { struct fuse_entry_out outarg; struct qstr name; if (!fc->export_support) goto out_err; name.len = 1; name.name = "."; err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, &inode); if (err && err != -ENOENT) goto out_err; if (err || !inode) { err = -ESTALE; goto out_err; } err = -EIO; if (get_node_id(inode) != handle->nodeid) goto out_iput; } err = -ESTALE; if (inode->i_generation != handle->generation) goto out_iput; entry = d_obtain_alias(inode); if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(entry); return entry; out_iput: iput(inode); out_err: return ERR_PTR(err); } static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; bool encode_parent = connectable && !S_ISDIR(inode->i_mode); int len = encode_parent ? 6 : 3; u64 nodeid; u32 generation; if (*max_len < len) { *max_len = len; return 255; } nodeid = get_fuse_inode(inode)->nodeid; generation = inode->i_generation; fh[0] = (u32)(nodeid >> 32); fh[1] = (u32)(nodeid & 0xffffffff); fh[2] = generation; if (encode_parent) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; nodeid = get_fuse_inode(parent)->nodeid; generation = parent->i_generation; spin_unlock(&dentry->d_lock); fh[3] = (u32)(nodeid >> 32); fh[4] = (u32)(nodeid & 0xffffffff); fh[5] = generation; } *max_len = len; return encode_parent ? 0x82 : 0x81; } static struct dentry *fuse_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle handle; if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3) return NULL; handle.nodeid = (u64) fid->raw[0] << 32; handle.nodeid |= (u64) fid->raw[1]; handle.generation = fid->raw[2]; return fuse_get_dentry(sb, &handle); } static struct dentry *fuse_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle parent; if (fh_type != 0x82 || fh_len < 6) return NULL; parent.nodeid = (u64) fid->raw[3] << 32; parent.nodeid |= (u64) fid->raw[4]; parent.generation = fid->raw[5]; return fuse_get_dentry(sb, &parent); } static struct dentry *fuse_get_parent(struct dentry *child) { struct inode *child_inode = child->d_inode; struct fuse_conn *fc = get_fuse_conn(child_inode); struct inode *inode; struct dentry *parent; struct fuse_entry_out outarg; struct qstr name; int err; if (!fc->export_support) return ERR_PTR(-ESTALE); name.len = 2; name.name = ".."; err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), &name, &outarg, &inode); if (err) { if (err == -ENOENT) return ERR_PTR(-ESTALE); return ERR_PTR(err); } parent = d_obtain_alias(inode); if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(parent); return parent; } static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, .encode_fh = fuse_encode_fh, .get_parent = fuse_get_parent, }; static const struct super_operations fuse_super_operations = { .alloc_inode = fuse_alloc_inode, .destroy_inode = fuse_destroy_inode, .evict_inode = fuse_evict_inode, .drop_inode = generic_delete_inode, .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, .show_options = fuse_show_options, }; static void sanitize_global_limit(unsigned *limit) { if (*limit == 0) *limit = ((num_physpages << PAGE_SHIFT) >> 13) / sizeof(struct fuse_req); if (*limit >= 1 << 16) *limit = (1 << 16) - 1; } static int set_global_limit(const char *val, struct kernel_param *kp) { int rv; rv = param_set_uint(val, kp); if (rv) return rv; sanitize_global_limit((unsigned *)kp->arg); return 0; } static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) { int cap_sys_admin = capable(CAP_SYS_ADMIN); if (arg->minor < 13) return; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); if (arg->max_background) { fc->max_background = arg->max_background; if (!cap_sys_admin && fc->max_background > max_user_bgreq) fc->max_background = max_user_bgreq; } if (arg->congestion_threshold) { fc->congestion_threshold = arg->congestion_threshold; if (!cap_sys_admin && fc->congestion_threshold > max_user_congthresh) fc->congestion_threshold = max_user_congthresh; } } static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_out *arg = &req->misc.init_out; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) fc->conn_error = 1; else { unsigned long ra_pages; process_init_limits(fc, arg); if (arg->minor >= 6) { ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; if (arg->flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_lock = 1; if (arg->minor >= 17) { if (!(arg->flags & FUSE_FLOCK_LOCKS)) fc->no_flock = 1; } else { if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_flock = 1; } if (arg->flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; if (arg->minor >= 9) { /* LOOKUP has dependency on proto version */ if (arg->flags & FUSE_EXPORT_SUPPORT) fc->export_support = 1; } if (arg->flags & FUSE_BIG_WRITES) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; fc->no_flock = 1; } fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; fc->max_write = max_t(unsigned, 4096, fc->max_write); fc->conn_init = 1; } fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_in *arg = &req->misc.init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_FLOCK_LOCKS; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; req->out.numargs = 1; /* Variable length argument used for backward compatibility with interface version < 7.5. Rest of init_out is zeroed by do_get_request(), so a short reply is not a problem */ req->out.argvar = 1; req->out.args[0].size = sizeof(struct fuse_init_out); req->out.args[0].value = &req->misc.init_out; req->end = process_init_reply; fuse_request_send_background(fc, req); } static void fuse_free_conn(struct fuse_conn *fc) { kfree(fc); } static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) { int err; fc->bdi.name = "fuse"; fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; err = bdi_init(&fc->bdi); if (err) return err; fc->bdi_initialized = 1; if (sb->s_bdev) { err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", MAJOR(fc->dev), MINOR(fc->dev)); } else { err = bdi_register_dev(&fc->bdi, fc->dev); } if (err) return err; /* * For a single fuse filesystem use max 1% of dirty + * writeback threshold. * * This gives about 1M of write buffer for memory maps on a * machine with 1G and 10% dirty_ratio, which should be more * than enough. * * Privileged users can raise it by writing to * * /sys/class/bdi/<bdi>/max_ratio */ bdi_set_max_ratio(&fc->bdi, 1); return 0; } static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; struct file *file; struct dentry *root_dentry; struct fuse_req *init_req; int err; int is_bdev = sb->s_bdev != NULL; err = -EINVAL; if (sb->s_flags & MS_MANDLOCK) goto err; sb->s_flags &= ~MS_NOSEC; if (!parse_fuse_opt((char *) data, &d, is_bdev)) goto err; if (is_bdev) { #ifdef CONFIG_BLOCK err = -EINVAL; if (!sb_set_blocksize(sb, d.blksize)) goto err; #endif } else { sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_export_op = &fuse_export_operations; file = fget(d.fd); err = -EINVAL; if (!file) goto err; if (file->f_op != &fuse_dev_operations) goto err_fput; fc = kmalloc(sizeof(*fc), GFP_KERNEL); err = -ENOMEM; if (!fc) goto err_fput; fuse_conn_init(fc); fc->dev = sb->s_dev; fc->sb = sb; err = fuse_bdi_init(fc, sb); if (err) goto err_put_conn; sb->s_bdi = &fc->bdi; /* Handle umasking inside the fuse code */ if (sb->s_flags & MS_POSIXACL) fc->dont_mask = 1; sb->s_flags |= MS_POSIXACL; fc->release = fuse_free_conn; fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); fc->allow_utime = d.allow_utime; /* Used by get_root_inode() */ sb->s_fs_info = fc; err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); if (!root) goto err_put_conn; root_dentry = d_alloc_root(root); if (!root_dentry) { iput(root); goto err_put_conn; } /* only now - we want root dentry with NULL ->d_op */ sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(); if (!init_req) goto err_put_root; if (is_bdev) { fc->destroy_req = fuse_request_alloc(); if (!fc->destroy_req) goto err_free_init_req; } mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) goto err_unlock; err = fuse_ctl_add_conn(fc); if (err) goto err_unlock; list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; fc->connected = 1; file->private_data = fuse_conn_get(fc); mutex_unlock(&fuse_mutex); /* * atomic_dec_and_test() in fput() provides the necessary * memory barrier for file->private_data to be visible on all * CPUs after this */ fput(file); fuse_send_init(fc, init_req); return 0; err_unlock: mutex_unlock(&fuse_mutex); err_free_init_req: fuse_request_free(init_req); err_put_root: dput(root_dentry); err_put_conn: fuse_bdi_destroy(fc); fuse_conn_put(fc); err_fput: fput(file); err: return err; } static struct dentry *fuse_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); } static void fuse_kill_sb_anon(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_anon_super(sb); } static struct file_system_type fuse_fs_type = { .owner = THIS_MODULE, .name = "fuse", .fs_flags = FS_HAS_SUBTYPE, .mount = fuse_mount, .kill_sb = fuse_kill_sb_anon, }; #ifdef CONFIG_BLOCK static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super); } static void fuse_kill_sb_blk(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_block_super(sb); } static struct file_system_type fuseblk_fs_type = { .owner = THIS_MODULE, .name = "fuseblk", .mount = fuse_mount_blk, .kill_sb = fuse_kill_sb_blk, .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, }; static inline int register_fuseblk(void) { return register_filesystem(&fuseblk_fs_type); } static inline void unregister_fuseblk(void) { unregister_filesystem(&fuseblk_fs_type); } #else static inline int register_fuseblk(void) { return 0; } static inline void unregister_fuseblk(void) { } #endif static void fuse_inode_init_once(void *foo) { struct inode *inode = foo; inode_init_once(inode); } static int __init fuse_fs_init(void) { int err; err = register_filesystem(&fuse_fs_type); if (err) goto out; err = register_fuseblk(); if (err) goto out_unreg; fuse_inode_cachep = kmem_cache_create("fuse_inode", sizeof(struct fuse_inode), 0, SLAB_HWCACHE_ALIGN, fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out_unreg2; return 0; out_unreg2: unregister_fuseblk(); out_unreg: unregister_filesystem(&fuse_fs_type); out: return err; } static void fuse_fs_cleanup(void) { unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); kmem_cache_destroy(fuse_inode_cachep); } static struct kobject *fuse_kobj; static struct kobject *connections_kobj; static int fuse_sysfs_init(void) { int err; fuse_kobj = kobject_create_and_add("fuse", fs_kobj); if (!fuse_kobj) { err = -ENOMEM; goto out_err; } connections_kobj = kobject_create_and_add("connections", fuse_kobj); if (!connections_kobj) { err = -ENOMEM; goto out_fuse_unregister; } return 0; out_fuse_unregister: kobject_put(fuse_kobj); out_err: return err; } static void fuse_sysfs_cleanup(void) { kobject_put(connections_kobj); kobject_put(fuse_kobj); } static int __init fuse_init(void) { int res; printk(KERN_INFO "fuse init (API version %i.%i)\n", FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); INIT_LIST_HEAD(&fuse_conn_list); res = fuse_fs_init(); if (res) goto err; res = fuse_dev_init(); if (res) goto err_fs_cleanup; res = fuse_sysfs_init(); if (res) goto err_dev_cleanup; res = fuse_ctl_init(); if (res) goto err_sysfs_cleanup; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); return 0; err_sysfs_cleanup: fuse_sysfs_cleanup(); err_dev_cleanup: fuse_dev_cleanup(); err_fs_cleanup: fuse_fs_cleanup(); err: return res; } static void __exit fuse_exit(void) { printk(KERN_DEBUG "fuse exit\n"); fuse_ctl_cleanup(); fuse_sysfs_cleanup(); fuse_fs_cleanup(); fuse_dev_cleanup(); } module_init(fuse_init); module_exit(fuse_exit);
gpl-2.0
FrostedKernel/android_kernel_htc_msm8960
sound/soc/msm/msm-pcm-voip.c
60
31698
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <asm/dma.h> #include "msm-pcm-q6.h" #include "msm-pcm-routing.h" #include "qdsp6/q6voice.h" #ifdef CONFIG_MACH_HTC #define VOIP_MAX_Q_LEN 2 #else #define VOIP_MAX_Q_LEN 10 #endif #define VOIP_MAX_VOC_PKT_SIZE 640 #define VOIP_MIN_VOC_PKT_SIZE 320 /* Length of the DSP frame info header added to the voc packet. */ #define DSP_FRAME_HDR_LEN 1 #define MODE_IS127 0x2 #define MODE_4GV_NB 0x3 #define MODE_4GV_WB 0x4 #define MODE_AMR 0x5 #define MODE_AMR_WB 0xD #define MODE_PCM 0xC enum format { FORMAT_S16_LE = 2, FORMAT_SPECIAL = 31, }; enum amr_rate_type { AMR_RATE_4750, /* AMR 4.75 kbps */ AMR_RATE_5150, /* AMR 5.15 kbps */ AMR_RATE_5900, /* AMR 5.90 kbps */ AMR_RATE_6700, /* AMR 6.70 kbps */ AMR_RATE_7400, /* AMR 7.40 kbps */ AMR_RATE_7950, /* AMR 7.95 kbps */ AMR_RATE_10200, /* AMR 10.20 kbps */ AMR_RATE_12200, /* AMR 12.20 kbps */ AMR_RATE_6600, /* AMR-WB 6.60 kbps */ AMR_RATE_8850, /* AMR-WB 8.85 kbps */ AMR_RATE_12650, /* AMR-WB 12.65 kbps */ AMR_RATE_14250, /* AMR-WB 14.25 kbps */ AMR_RATE_15850, /* AMR-WB 15.85 kbps */ AMR_RATE_18250, /* AMR-WB 18.25 kbps */ AMR_RATE_19850, /* AMR-WB 19.85 kbps */ AMR_RATE_23050, /* AMR-WB 23.05 kbps */ AMR_RATE_23850, /* AMR-WB 23.85 kbps */ AMR_RATE_UNDEF }; enum voip_state { VOIP_STOPPED, VOIP_STARTED, }; struct voip_frame { union { uint32_t frame_type; uint32_t packet_rate; } header; uint32_t len; uint8_t voc_pkt[VOIP_MAX_VOC_PKT_SIZE]; }; struct voip_buf_node { struct list_head list; struct voip_frame frame; }; struct voip_drv_info { enum voip_state state; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; struct list_head in_queue; struct list_head free_in_queue; struct list_head out_queue; struct list_head free_out_queue; wait_queue_head_t out_wait; wait_queue_head_t in_wait; struct mutex lock; spinlock_t dsp_lock; spinlock_t dsp_ul_lock; uint32_t mode; uint32_t rate_type; uint32_t rate; uint32_t dtx_mode; uint8_t capture_start; uint8_t playback_start; uint8_t playback_instance; uint8_t capture_instance; unsigned int play_samp_rate; unsigned int cap_samp_rate; unsigned int pcm_size; unsigned int pcm_count; unsigned int pcm_playback_irq_pos; /* IRQ position */ unsigned int pcm_playback_buf_pos; /* position in buffer */ unsigned int pcm_capture_size; unsigned int pcm_capture_count; unsigned int pcm_capture_irq_pos; /* IRQ position */ unsigned int pcm_capture_buf_pos; /* position in buffer */ }; static int voip_get_media_type(uint32_t mode, unsigned int samp_rate); static int voip_get_rate_type(uint32_t mode, uint32_t rate, uint32_t *rate_type); static int msm_voip_mode_rate_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static int msm_voip_mode_rate_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); static struct voip_drv_info voip_info; static struct snd_pcm_hardware msm_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED), .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_SPECIAL, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = sizeof(struct voip_buf_node) * VOIP_MAX_Q_LEN, .period_bytes_min = VOIP_MIN_VOC_PKT_SIZE, .period_bytes_max = VOIP_MAX_VOC_PKT_SIZE, .periods_min = VOIP_MAX_Q_LEN, .periods_max = VOIP_MAX_Q_LEN, .fifo_size = 0, }; static int msm_voip_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int mute = ucontrol->value.integer.value[0]; pr_debug("%s: mute=%d\n", __func__, mute); voc_set_tx_mute(voc_get_session_id(VOIP_SESSION_NAME), TX_PATH, mute); return 0; } static int msm_voip_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_voip_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int volume = ucontrol->value.integer.value[0]; pr_debug("%s: volume: %d\n", __func__, volume); voc_set_rx_vol_index(voc_get_session_id(VOIP_SESSION_NAME), RX_PATH, volume); return 0; } static int msm_voip_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = 0; return 0; } static int msm_voip_dtx_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); voip_info.dtx_mode = ucontrol->value.integer.value[0]; pr_debug("%s: dtx: %d\n", __func__, voip_info.dtx_mode); mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_dtx_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); ucontrol->value.integer.value[0] = voip_info.dtx_mode; mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_fens_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int fens_enable = ucontrol->value.integer.value[0]; pr_debug("%s: FENS_VOIP enable=%d\n", __func__, fens_enable); voc_set_pp_enable(voc_get_session_id(VOIP_SESSION_NAME), MODULE_ID_VOICE_MODULE_FENS, fens_enable); return 0; } static int msm_voip_fens_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = voc_get_pp_enable(voc_get_session_id(VOIP_SESSION_NAME), MODULE_ID_VOICE_MODULE_FENS); return 0; } static struct snd_kcontrol_new msm_voip_controls[] = { SOC_SINGLE_EXT("Voip Tx Mute", SND_SOC_NOPM, 0, 1, 0, msm_voip_mute_get, msm_voip_mute_put), SOC_SINGLE_EXT("Voip Rx Volume", SND_SOC_NOPM, 0, 5, 0, msm_voip_volume_get, msm_voip_volume_put), SOC_SINGLE_MULTI_EXT("Voip Mode Rate Config", SND_SOC_NOPM, 0, 23850, 0, 2, msm_voip_mode_rate_config_get, msm_voip_mode_rate_config_put), SOC_SINGLE_EXT("Voip Dtx Mode", SND_SOC_NOPM, 0, 1, 0, msm_voip_dtx_mode_get, msm_voip_dtx_mode_put), SOC_SINGLE_EXT("FENS_VOIP Enable", SND_SOC_NOPM, 0, 1, 0, msm_voip_fens_get, msm_voip_fens_put), }; static int msm_pcm_voip_probe(struct snd_soc_platform *platform) { snd_soc_add_platform_controls(platform, msm_voip_controls, ARRAY_SIZE(msm_voip_controls)); return 0; } /* sample rate supported */ static unsigned int supported_sample_rates[] = {8000, 16000}; /* capture path */ static void voip_process_ul_pkt(uint8_t *voc_pkt, uint32_t pkt_len, void *private_data) { struct voip_buf_node *buf_node = NULL; struct voip_drv_info *prtd = private_data; unsigned long dsp_flags; if (prtd->capture_substream == NULL) return; /* Copy up-link packet into out_queue. */ spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); /* discarding UL packets till start is received */ if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) { buf_node = list_first_entry(&prtd->free_out_queue, struct voip_buf_node, list); list_del(&buf_node->list); switch (prtd->mode) { case MODE_AMR_WB: case MODE_AMR: { /* Remove the DSP frame info header. Header format: * Bits 0-3: Frame rate * Bits 4-7: Frame type */ buf_node->frame.header.frame_type = ((*voc_pkt) & 0xF0) >> 4; voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN; memcpy(&buf_node->frame.voc_pkt[0], voc_pkt, buf_node->frame.len); list_add_tail(&buf_node->list, &prtd->out_queue); break; } case MODE_IS127: case MODE_4GV_NB: case MODE_4GV_WB: { /* Remove the DSP frame info header. * Header format: * Bits 0-3: frame rate */ buf_node->frame.header.packet_rate = (*voc_pkt) & 0x0F; voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN; memcpy(&buf_node->frame.voc_pkt[0], voc_pkt, buf_node->frame.len); list_add_tail(&buf_node->list, &prtd->out_queue); break; } default: { buf_node->frame.len = pkt_len; memcpy(&buf_node->frame.voc_pkt[0], voc_pkt, buf_node->frame.len); list_add_tail(&buf_node->list, &prtd->out_queue); } } pr_debug("ul_pkt: pkt_len =%d, frame.len=%d\n", pkt_len, buf_node->frame.len); prtd->pcm_capture_irq_pos += prtd->pcm_capture_count; spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); snd_pcm_period_elapsed(prtd->capture_substream); } else { spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); pr_err("UL data dropped\n"); } wake_up(&prtd->out_wait); } /* playback path */ static void voip_process_dl_pkt(uint8_t *voc_pkt, uint32_t *pkt_len, void *private_data) { struct voip_buf_node *buf_node = NULL; struct voip_drv_info *prtd = private_data; unsigned long dsp_flags; if (prtd->playback_substream == NULL) return; spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); if (!list_empty(&prtd->in_queue) && prtd->playback_start) { buf_node = list_first_entry(&prtd->in_queue, struct voip_buf_node, list); list_del(&buf_node->list); switch (prtd->mode) { case MODE_AMR: case MODE_AMR_WB: { /* Add the DSP frame info header. Header format: * Bits 0-3: Frame rate * Bits 4-7: Frame type */ *voc_pkt = ((buf_node->frame.header.frame_type & 0x0F) << 4) | (prtd->rate_type & 0x0F); voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; memcpy(voc_pkt, &buf_node->frame.voc_pkt[0], buf_node->frame.len); list_add_tail(&buf_node->list, &prtd->free_in_queue); break; } case MODE_IS127: case MODE_4GV_NB: case MODE_4GV_WB: { /* Add the DSP frame info header. Header format: * Bits 0-3 : Frame rate */ *voc_pkt = buf_node->frame.header.packet_rate & 0x0F; voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; memcpy(voc_pkt, &buf_node->frame.voc_pkt[0], buf_node->frame.len); list_add_tail(&buf_node->list, &prtd->free_in_queue); break; } default: { *pkt_len = buf_node->frame.len; memcpy(voc_pkt, &buf_node->frame.voc_pkt[0], buf_node->frame.len); list_add_tail(&buf_node->list, &prtd->free_in_queue); } } pr_debug("dl_pkt: pkt_len=%d, frame_len=%d\n", *pkt_len, buf_node->frame.len); prtd->pcm_playback_irq_pos += prtd->pcm_count; spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); snd_pcm_period_elapsed(prtd->playback_substream); } else { *pkt_len = 0; spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); pr_err("DL data not available\n"); } wake_up(&prtd->in_wait); } static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; prtd->play_samp_rate = runtime->rate; prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_playback_irq_pos = 0; prtd->pcm_playback_buf_pos = 0; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; int ret = 0; prtd->cap_samp_rate = runtime->rate; prtd->pcm_capture_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_capture_irq_pos = 0; prtd->pcm_capture_buf_pos = 0; return ret; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: pr_debug("%s: Trigger start\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) prtd->capture_start = 1; else prtd->playback_start = 1; break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("SNDRV_PCM_TRIGGER_STOP\n"); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->playback_start = 0; else prtd->capture_start = 0; break; default: ret = -EINVAL; break; } return ret; } static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = &voip_info; int ret = 0; pr_debug("%s, VoIP\n", __func__); mutex_lock(&prtd->lock); runtime->hw = msm_pcm_hardware; ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) pr_debug("snd_pcm_hw_constraint_list failed\n"); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { pr_debug("snd_pcm_hw_constraint_integer failed\n"); goto err; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prtd->playback_substream = substream; prtd->playback_instance++; } else { prtd->capture_substream = substream; prtd->capture_instance++; } runtime->private_data = prtd; err: mutex_unlock(&prtd->lock); return ret; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; struct voip_buf_node *buf_node = NULL; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; unsigned long dsp_flags; int count = frames_to_bytes(runtime, frames); pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames); ret = wait_event_interruptible_timeout(prtd->in_wait, (!list_empty(&prtd->free_in_queue) || prtd->state == VOIP_STOPPED), 1 * HZ); if (ret > 0) { if (count <= VOIP_MAX_VOC_PKT_SIZE) { spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); buf_node = list_first_entry(&prtd->free_in_queue, struct voip_buf_node, list); list_del(&buf_node->list); spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); if (prtd->mode == MODE_PCM) { ret = copy_from_user(&buf_node->frame.voc_pkt, buf, count); buf_node->frame.len = count; } else ret = copy_from_user(&buf_node->frame, buf, count); spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); list_add_tail(&buf_node->list, &prtd->in_queue); spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); } else { pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n", __func__, count); ret = -ENOMEM; } } else if (ret == 0) { pr_err("%s: No free DL buffs\n", __func__); ret = -ETIMEDOUT; } else { pr_err("%s: playback copy was interrupted\n", __func__); } return ret; } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int count = 0; struct voip_buf_node *buf_node = NULL; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; unsigned long dsp_flags; int size; count = frames_to_bytes(runtime, frames); pr_debug("%s: count = %d\n", __func__, count); ret = wait_event_interruptible_timeout(prtd->out_wait, (!list_empty(&prtd->out_queue) || prtd->state == VOIP_STOPPED), 1 * HZ); if (ret > 0) { if (count <= VOIP_MAX_VOC_PKT_SIZE) { spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); buf_node = list_first_entry(&prtd->out_queue, struct voip_buf_node, list); list_del(&buf_node->list); spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); if (prtd->mode == MODE_PCM) { ret = copy_to_user(buf, &buf_node->frame.voc_pkt, buf_node->frame.len); } else { size = sizeof(buf_node->frame.header) + sizeof(buf_node->frame.len) + buf_node->frame.len; ret = copy_to_user(buf, &buf_node->frame, size); } if (ret) { pr_err("%s: Copy to user retuned %d\n", __func__, ret); ret = -EFAULT; } spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); list_add_tail(&buf_node->list, &prtd->free_out_queue); spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); } else { pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n", __func__, count); ret = -ENOMEM; } } else if (ret == 0) { pr_err("%s: No UL data available\n", __func__); ret = -ETIMEDOUT; } else { pr_err("%s: Read was interrupted\n", __func__); ret = -ERESTARTSYS; } return ret; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; struct list_head *ptr = NULL; struct list_head *next = NULL; struct voip_buf_node *buf_node = NULL; struct snd_dma_buffer *p_dma_buf, *c_dma_buf; struct snd_pcm_substream *p_substream, *c_substream; struct snd_pcm_runtime *runtime; struct voip_drv_info *prtd; unsigned long dsp_flags; if (substream == NULL) { pr_err("substream is NULL\n"); return -EINVAL; } runtime = substream->runtime; prtd = runtime->private_data; wake_up(&prtd->out_wait); mutex_lock(&prtd->lock); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->playback_instance--; else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) prtd->capture_instance--; if (!prtd->playback_instance && !prtd->capture_instance) { if (prtd->state == VOIP_STARTED) { prtd->state = VOIP_STOPPED; voc_end_voice_call( voc_get_session_id(VOIP_SESSION_NAME)); voc_register_mvs_cb(NULL, NULL, prtd); } /* release all buffer */ /* release in_queue and free_in_queue */ pr_debug("release all buffer\n"); p_substream = prtd->playback_substream; if (p_substream == NULL) { pr_debug("p_substream is NULL\n"); goto capt; } p_dma_buf = &p_substream->dma_buffer; if (p_dma_buf == NULL) { pr_debug("p_dma_buf is NULL\n"); goto capt; } if (p_dma_buf->area != NULL) { spin_lock_irqsave(&prtd->dsp_lock, dsp_flags); list_for_each_safe(ptr, next, &prtd->in_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } list_for_each_safe(ptr, next, &prtd->free_in_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags); dma_free_coherent(p_substream->pcm->card->dev, runtime->hw.buffer_bytes_max, p_dma_buf->area, p_dma_buf->addr); p_dma_buf->area = NULL; } /* release out_queue and free_out_queue */ capt: c_substream = prtd->capture_substream; if (c_substream == NULL) { pr_debug("c_substream is NULL\n"); goto done; } c_dma_buf = &c_substream->dma_buffer; if (c_substream == NULL) { pr_debug("c_dma_buf is NULL.\n"); goto done; } if (c_dma_buf->area != NULL) { spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags); list_for_each_safe(ptr, next, &prtd->out_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } list_for_each_safe(ptr, next, &prtd->free_out_queue) { buf_node = list_entry(ptr, struct voip_buf_node, list); list_del(&buf_node->list); } spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags); dma_free_coherent(c_substream->pcm->card->dev, runtime->hw.buffer_bytes_max, c_dma_buf->area, c_dma_buf->addr); c_dma_buf->area = NULL; } done: prtd->capture_substream = NULL; prtd->playback_substream = NULL; } mutex_unlock(&prtd->lock); return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; uint32_t media_type = 0; uint32_t rate_type = 0; mutex_lock(&prtd->lock); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); if ((runtime->format != FORMAT_SPECIAL) && ((prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) || (prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) || (prtd->mode == MODE_4GV_WB))) { pr_err("mode:%d and format:%u are not mached\n", prtd->mode, (uint32_t)runtime->format); ret = -EINVAL; goto done; } if ((runtime->format != FORMAT_S16_LE) && (prtd->mode == MODE_PCM)) { pr_err("mode:%d and format:%u are not mached\n", prtd->mode, (uint32_t)runtime->format); ret = -EINVAL; goto done; } if (prtd->playback_instance && prtd->capture_instance && (prtd->state != VOIP_STARTED)) { ret = voip_get_rate_type(prtd->mode, prtd->rate, &rate_type); if (ret < 0) { pr_err("fail at getting rate_type\n"); ret = -EINVAL; goto done; } prtd->rate_type = rate_type; media_type = voip_get_media_type(prtd->mode, prtd->play_samp_rate); if (media_type < 0) { pr_err("fail at getting media_type\n"); ret = -EINVAL; goto done; } pr_debug(" media_type=%d, rate_type=%d\n", media_type, rate_type); if ((prtd->play_samp_rate == 8000) && (prtd->cap_samp_rate == 8000)) voc_config_vocoder(media_type, rate_type, VSS_NETWORK_ID_VOIP_NB, voip_info.dtx_mode); else if ((prtd->play_samp_rate == 16000) && (prtd->cap_samp_rate == 16000)) voc_config_vocoder(media_type, rate_type, VSS_NETWORK_ID_VOIP_WB, voip_info.dtx_mode); else { pr_debug("%s: Invalid rate playback %d, capture %d\n", __func__, prtd->play_samp_rate, prtd->cap_samp_rate); goto done; } voc_register_mvs_cb(voip_process_ul_pkt, voip_process_dl_pkt, prtd); voc_start_voice_call(voc_get_session_id(VOIP_SESSION_NAME)); prtd->state = VOIP_STARTED; } done: mutex_unlock(&prtd->lock); return ret; } static snd_pcm_uframes_t msm_pcm_playback_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; pr_debug("%s\n", __func__); if (prtd->pcm_playback_irq_pos >= prtd->pcm_size) prtd->pcm_playback_irq_pos = 0; return bytes_to_frames(runtime, (prtd->pcm_playback_irq_pos)); } static snd_pcm_uframes_t msm_pcm_capture_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voip_drv_info *prtd = runtime->private_data; if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size) prtd->pcm_capture_irq_pos = 0; return bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos)); } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { snd_pcm_uframes_t ret = 0; pr_debug("%s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_pointer(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_pointer(substream); return ret; } static int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; pr_debug("%s\n", __func__); dma_mmap_coherent(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); return 0; } static int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct voip_buf_node *buf_node = NULL; int i = 0, offset = 0; pr_debug("%s: voip\n", __func__); mutex_lock(&voip_info.lock); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev, runtime->hw.buffer_bytes_max, &dma_buf->addr, GFP_KERNEL); if (!dma_buf->area) { pr_err("%s:MSM VOIP dma_alloc failed\n", __func__); return -ENOMEM; } dma_buf->bytes = runtime->hw.buffer_bytes_max; memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < VOIP_MAX_Q_LEN; i++) { buf_node = (void *)dma_buf->area + offset; list_add_tail(&buf_node->list, &voip_info.free_in_queue); offset = offset + sizeof(struct voip_buf_node); } } else { for (i = 0; i < VOIP_MAX_Q_LEN; i++) { buf_node = (void *) dma_buf->area + offset; list_add_tail(&buf_node->list, &voip_info.free_out_queue); offset = offset + sizeof(struct voip_buf_node); } } mutex_unlock(&voip_info.lock); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static int msm_voip_mode_rate_config_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); ucontrol->value.integer.value[0] = voip_info.mode; ucontrol->value.integer.value[1] = voip_info.rate; mutex_unlock(&voip_info.lock); return 0; } static int msm_voip_mode_rate_config_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&voip_info.lock); voip_info.mode = ucontrol->value.integer.value[0]; voip_info.rate = ucontrol->value.integer.value[1]; pr_debug("%s: mode=%d,rate=%d\n", __func__, voip_info.mode, voip_info.rate); mutex_unlock(&voip_info.lock); return 0; } static int voip_get_rate_type(uint32_t mode, uint32_t rate, uint32_t *rate_type) { int ret = 0; switch (mode) { case MODE_AMR: { switch (rate) { case 4750: *rate_type = AMR_RATE_4750; break; case 5150: *rate_type = AMR_RATE_5150; break; case 5900: *rate_type = AMR_RATE_5900; break; case 6700: *rate_type = AMR_RATE_6700; break; case 7400: *rate_type = AMR_RATE_7400; break; case 7950: *rate_type = AMR_RATE_7950; break; case 10200: *rate_type = AMR_RATE_10200; break; case 12200: *rate_type = AMR_RATE_12200; break; default: pr_err("wrong rate for AMR NB.\n"); ret = -EINVAL; break; } break; } case MODE_AMR_WB: { switch (rate) { case 6600: *rate_type = AMR_RATE_6600 - AMR_RATE_6600; break; case 8850: *rate_type = AMR_RATE_8850 - AMR_RATE_6600; break; case 12650: *rate_type = AMR_RATE_12650 - AMR_RATE_6600; break; case 14250: *rate_type = AMR_RATE_14250 - AMR_RATE_6600; break; case 15850: *rate_type = AMR_RATE_15850 - AMR_RATE_6600; break; case 18250: *rate_type = AMR_RATE_18250 - AMR_RATE_6600; break; case 19850: *rate_type = AMR_RATE_19850 - AMR_RATE_6600; break; case 23050: *rate_type = AMR_RATE_23050 - AMR_RATE_6600; break; case 23850: *rate_type = AMR_RATE_23850 - AMR_RATE_6600; break; default: pr_err("wrong rate for AMR_WB.\n"); ret = -EINVAL; break; } break; } case MODE_PCM: { *rate_type = 0; break; } case MODE_IS127: case MODE_4GV_NB: case MODE_4GV_WB: { switch (rate) { case VOC_0_RATE: case VOC_8_RATE: case VOC_4_RATE: case VOC_2_RATE: case VOC_1_RATE: *rate_type = rate; break; default: pr_err("wrong rate for IS127/4GV_NB/WB.\n"); ret = -EINVAL; break; } break; } default: pr_err("wrong mode type.\n"); ret = -EINVAL; } pr_debug("%s, mode=%d, rate=%u, rate_type=%d\n", __func__, mode, rate, *rate_type); return ret; } static int voip_get_media_type(uint32_t mode, unsigned int samp_rate) { uint32_t media_type; pr_debug("%s: mode=%d, samp_rate=%d\n", __func__, mode, samp_rate); switch (mode) { case MODE_AMR: media_type = VSS_MEDIA_ID_AMR_NB_MODEM; break; case MODE_AMR_WB: media_type = VSS_MEDIA_ID_AMR_WB_MODEM; break; case MODE_PCM: if (samp_rate == 8000) media_type = VSS_MEDIA_ID_PCM_NB; else media_type = VSS_MEDIA_ID_PCM_WB; break; case MODE_IS127: /* EVRC-A */ media_type = VSS_MEDIA_ID_EVRC_MODEM; break; case MODE_4GV_NB: /* EVRC-B */ media_type = VSS_MEDIA_ID_4GV_NB_MODEM; break; case MODE_4GV_WB: /* EVRC-WB */ media_type = VSS_MEDIA_ID_4GV_WB_MODEM; break; default: pr_debug(" input mode is not supported\n"); media_type = -EINVAL; } pr_debug("%s: media_type is 0x%x\n", __func__, media_type); return media_type; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, .mmap = msm_pcm_mmap, }; static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; int ret = 0; pr_debug("msm_asoc_pcm_new\n"); if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static struct snd_soc_platform_driver msm_soc_platform = { .ops = &msm_pcm_ops, .pcm_new = msm_asoc_pcm_new, .probe = msm_pcm_voip_probe, }; static __devinit int msm_pcm_probe(struct platform_device *pdev) { pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &msm_soc_platform); } static int msm_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver msm_pcm_driver = { .driver = { .name = "msm-voip-dsp", .owner = THIS_MODULE, }, .probe = msm_pcm_probe, .remove = __devexit_p(msm_pcm_remove), }; static int __init msm_soc_platform_init(void) { memset(&voip_info, 0, sizeof(voip_info)); voip_info.mode = MODE_PCM; mutex_init(&voip_info.lock); spin_lock_init(&voip_info.dsp_lock); spin_lock_init(&voip_info.dsp_ul_lock); init_waitqueue_head(&voip_info.out_wait); init_waitqueue_head(&voip_info.in_wait); INIT_LIST_HEAD(&voip_info.in_queue); INIT_LIST_HEAD(&voip_info.free_in_queue); INIT_LIST_HEAD(&voip_info.out_queue); INIT_LIST_HEAD(&voip_info.free_out_queue); return platform_driver_register(&msm_pcm_driver); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { platform_driver_unregister(&msm_pcm_driver); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
basanta078/linux-370
fs/nls/nls_cp1251.c
60
12691
/* * linux/fs/nls/nls_cp1251.c * * Charset cp1251 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0402, 0x0403, 0x201a, 0x0453, 0x201e, 0x2026, 0x2020, 0x2021, 0x20ac, 0x2030, 0x0409, 0x2039, 0x040a, 0x040c, 0x040b, 0x040f, /* 0x90*/ 0x0452, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x2122, 0x0459, 0x203a, 0x045a, 0x045c, 0x045b, 0x045f, /* 0xa0*/ 0x00a0, 0x040e, 0x045e, 0x0408, 0x00a4, 0x0490, 0x00a6, 0x00a7, 0x0401, 0x00a9, 0x0404, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x0407, /* 0xb0*/ 0x00b0, 0x00b1, 0x0406, 0x0456, 0x0491, 0x00b5, 0x00b6, 0x00b7, 0x0451, 0x2116, 0x0454, 0x00bb, 0x0458, 0x0405, 0x0455, 0x0457, /* 0xc0*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0xd0*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xe0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xf0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, }; static unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static unsigned char page04[256] = { 0x00, 0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, /* 0x00-0x07 */ 0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x00, 0xa1, 0x8f, /* 0x08-0x0f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x10-0x17 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x18-0x1f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x20-0x27 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x28-0x2f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0x48-0x4f */ 0x00, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, /* 0x50-0x57 */ 0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x00, 0xa2, 0x9f, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xa5, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */ 0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ }; static unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static unsigned char *page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x90, 0x83, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa2, 0xa2, 0xbc, 0xa4, 0xb4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb3, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x81, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x80, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb2, 0xa5, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xa8, 0xb9, 0xaa, 0xbb, 0xa3, 0xbd, 0xbd, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp1251", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp1251(void) { return register_nls(&table); } static void __exit exit_nls_cp1251(void) { unregister_nls(&table); } module_init(init_nls_cp1251) module_exit(exit_nls_cp1251) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
androidarmv6/android_kernel_lge_msm7x27
arch/arm/mach-pxa/e400.c
828
3829
/* * Hardware definitions for the Toshiba eseries PDAs * * Copyright (c) 2003 Ian Molton <spyro@f2s.com> * * This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/mfd/t7l66xb.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <mach/pxa25x.h> #include <mach/eseries-gpio.h> #include <mach/pxafb.h> #include <mach/udc.h> #include <mach/irqs.h> #include "generic.h" #include "eseries.h" #include "clock.h" /* ------------------------ E400 LCD definitions ------------------------ */ static struct pxafb_mode_info e400_pxafb_mode_info = { .pixclock = 140703, .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 4, .left_margin = 28, .right_margin = 8, .vsync_len = 3, .upper_margin = 5, .lower_margin = 6, .sync = 0, }; static struct pxafb_mach_info e400_pxafb_mach_info = { .modes = &e400_pxafb_mode_info, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP, .lccr3 = 0, .pxafb_backlight_power = NULL, }; /* ------------------------ E400 MFP config ----------------------------- */ static unsigned long e400_pin_config[] __initdata = { /* Chip selects */ GPIO15_nCS_1, /* CS1 - Flash */ GPIO80_nCS_4, /* CS4 - TMIO */ /* Clocks */ GPIO12_32KHz, /* BTUART */ GPIO42_BTUART_RXD, GPIO43_BTUART_TXD, GPIO44_BTUART_CTS, /* TMIO controller */ GPIO19_GPIO, /* t7l66xb #PCLR */ GPIO45_GPIO, /* t7l66xb #SUSPEND (NOT BTUART!) */ /* wakeup */ GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, }; /* ---------------------------------------------------------------------- */ static struct mtd_partition partition_a = { .name = "Internal NAND flash", .offset = 0, .size = MTDPART_SIZ_FULL, }; static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; static struct nand_bbt_descr e400_t7l66xb_nand_bbt = { .options = 0, .offs = 4, .len = 2, .pattern = scan_ff_pattern }; static struct tmio_nand_data e400_t7l66xb_nand_config = { .num_partitions = 1, .partition = &partition_a, .badblock_pattern = &e400_t7l66xb_nand_bbt, }; static struct t7l66xb_platform_data e400_t7l66xb_info = { .irq_base = IRQ_BOARD_START, .enable = &eseries_tmio_enable, .suspend = &eseries_tmio_suspend, .resume = &eseries_tmio_resume, .nand_data = &e400_t7l66xb_nand_config, }; static struct platform_device e400_t7l66xb_device = { .name = "t7l66xb", .id = -1, .dev = { .platform_data = &e400_t7l66xb_info, }, .num_resources = 2, .resource = eseries_tmio_resources, }; /* ---------------------------------------------------------- */ static struct platform_device *devices[] __initdata = { &e400_t7l66xb_device, }; static void __init e400_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(e400_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); /* Fixme - e400 may have a switched clock */ eseries_register_clks(); eseries_get_tmio_gpios(); set_pxa_fb_info(&e400_pxafb_mach_info); platform_add_devices(devices, ARRAY_SIZE(devices)); pxa_set_udc_info(&e7xx_udc_mach_info); } MACHINE_START(E400, "Toshiba e400") /* Maintainer: Ian Molton (spyro@f2s.com) */ .phys_io = 0x40000000, .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, .boot_params = 0xa0000100, .map_io = pxa_map_io, .init_irq = pxa25x_init_irq, .fixup = eseries_fixup, .init_machine = e400_init, .timer = &pxa_timer, MACHINE_END
gpl-2.0
giveme13s/android_kernel_oneplus_msm8974
arch/arm/mach-msm/spm-regulator.c
1852
14017
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/spmi.h> #include <linux/string.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include "spm.h" #include "spm-regulator.h" #define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator" struct voltage_range { int min_uV; int set_point_min_uV; int max_uV; int step_uV; }; /* Properties for FTS2 type QPNP PMIC regulators. */ static const struct voltage_range fts2_range0 = {0, 350000, 1275000, 5000}; static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000}; #define QPNP_FTS2_REG_TYPE 0x04 #define QPNP_FTS2_REG_SUBTYPE 0x05 #define QPNP_FTS2_REG_VOLTAGE_RANGE 0x40 #define QPNP_FTS2_REG_VOLTAGE_SETPOINT 0x41 #define QPNP_FTS2_REG_MODE 0x45 #define QPNP_FTS2_REG_STEP_CTRL 0x61 #define QPNP_FTS2_TYPE 0x1C #define QPNP_FTS2_SUBTYPE 0x08 #define QPNP_FTS2_MODE_PWM 0x80 #define QPNP_FTS2_MODE_AUTO 0x40 #define QPNP_FTS2_STEP_CTRL_STEP_MASK 0x18 #define QPNP_FTS2_STEP_CTRL_STEP_SHIFT 3 #define QPNP_FTS2_STEP_CTRL_DELAY_MASK 0x07 #define QPNP_FTS2_STEP_CTRL_DELAY_SHIFT 0 /* Clock rate in kHz of the FTS2 regulator reference clock. */ #define QPNP_FTS2_CLOCK_RATE 19200 /* Time to delay in us to ensure that a mode change has completed. */ #define QPNP_FTS2_MODE_CHANGE_DELAY 50 /* Minimum time in us that it takes to complete a single SPMI write. */ #define QPNP_SPMI_WRITE_MIN_DELAY 8 /* * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to * adjust the step rate in order to account for oscillator variance. */ #define QPNP_FTS2_STEP_MARGIN_NUM 4 #define QPNP_FTS2_STEP_MARGIN_DEN 5 struct spm_vreg { struct regulator_desc rdesc; struct regulator_dev *rdev; struct spmi_device *spmi_dev; const struct voltage_range *range; int uV; int last_set_uV; unsigned vlevel; unsigned last_set_vlevel; bool online; u16 spmi_base_addr; u8 init_mode; int step_rate; }; static int qpnp_fts2_set_mode(struct spm_vreg *vreg, u8 mode) { int rc; rc = spmi_ext_register_writel(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_MODE, &mode, 1); if (rc) dev_err(&vreg->spmi_dev->dev, "%s: could not write to mode register, rc=%d\n", __func__, rc); return rc; } static int _spm_regulator_set_voltage(struct regulator_dev *rdev) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); int rc; if (vreg->vlevel == vreg->last_set_vlevel) return 0; if (!(vreg->init_mode & QPNP_FTS2_MODE_PWM) && vreg->uV > vreg->last_set_uV) { /* Switch to PWM mode so that voltage ramping is fast. */ rc = qpnp_fts2_set_mode(vreg, QPNP_FTS2_MODE_PWM); if (rc) return rc; } rc = msm_spm_set_vdd(0, vreg->vlevel); /* value of CPU is don't care */ if (rc) { pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->rdesc.name, rc); return rc; } if (vreg->uV > vreg->last_set_uV) { /* Wait for voltage stepping to complete. */ udelay(DIV_ROUND_UP(vreg->uV - vreg->last_set_uV, vreg->step_rate)); } if (!(vreg->init_mode & QPNP_FTS2_MODE_PWM) && vreg->uV > vreg->last_set_uV) { /* Wait for mode transition to complete. */ udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY); /* Switch to AUTO mode so that power consumption is lowered. */ rc = qpnp_fts2_set_mode(vreg, QPNP_FTS2_MODE_AUTO); if (rc) return rc; } vreg->last_set_uV = vreg->uV; vreg->last_set_vlevel = vreg->vlevel; return rc; } static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); const struct voltage_range *range = vreg->range; int uV = min_uV; unsigned vlevel; if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV) uV = range->set_point_min_uV; if (uV < range->set_point_min_uV || uV > range->max_uV) { pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n", vreg->rdesc.name, min_uV, max_uV, range->set_point_min_uV, range->max_uV); return -EINVAL; } vlevel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV); uV = vlevel * range->step_uV + range->min_uV; if (uV > max_uV) { pr_err("%s: request v=[%d, %d] cannot be met by any set point\n", vreg->rdesc.name, min_uV, max_uV); return -EINVAL; } vreg->vlevel = vlevel; vreg->uV = uV; *selector = vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV) / vreg->range->step_uV; if (!vreg->online) return 0; return _spm_regulator_set_voltage(rdev); } static int spm_regulator_get_voltage(struct regulator_dev *rdev) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); return vreg->uV; } static int spm_regulator_list_voltage(struct regulator_dev *rdev, unsigned selector) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); if (selector >= vreg->rdesc.n_voltages) return 0; return selector * vreg->range->step_uV + vreg->range->set_point_min_uV; } static int spm_regulator_enable(struct regulator_dev *rdev) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); int rc; rc = _spm_regulator_set_voltage(rdev); if (!rc) vreg->online = true; return rc; } static int spm_regulator_disable(struct regulator_dev *rdev) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); vreg->online = false; return 0; } static int spm_regulator_is_enabled(struct regulator_dev *rdev) { struct spm_vreg *vreg = rdev_get_drvdata(rdev); return vreg->online; } static struct regulator_ops spm_regulator_ops = { .get_voltage = spm_regulator_get_voltage, .set_voltage = spm_regulator_set_voltage, .list_voltage = spm_regulator_list_voltage, .enable = spm_regulator_enable, .disable = spm_regulator_disable, .is_enabled = spm_regulator_is_enabled, }; static int qpnp_fts2_check_type(struct spm_vreg *vreg) { int rc; u8 type[2]; rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_TYPE, type, 2); if (rc) { dev_err(&vreg->spmi_dev->dev, "%s: could not read type register, rc=%d\n", __func__, rc); return rc; } if (type[0] != QPNP_FTS2_TYPE || type[1] != QPNP_FTS2_SUBTYPE) { dev_err(&vreg->spmi_dev->dev, "%s: invalid type=0x%02X or subtype=0x%02X register value\n", __func__, type[0], type[1]); return -ENODEV; } return rc; } static int qpnp_fts2_init_range(struct spm_vreg *vreg) { int rc; u8 reg = 0; rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_VOLTAGE_RANGE, &reg, 1); if (rc) { dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage range register, rc=%d\n", __func__, rc); return rc; } if (reg == 0x00) { vreg->range = &fts2_range0; } else if (reg == 0x01) { vreg->range = &fts2_range1; } else { dev_err(&vreg->spmi_dev->dev, "%s: voltage range=%d is invalid\n", __func__, reg); rc = -EINVAL; } return rc; } static int qpnp_fts2_init_voltage(struct spm_vreg *vreg) { int rc; u8 reg = 0; rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_VOLTAGE_SETPOINT, &reg, 1); if (rc) { dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage setpoint register, rc=%d\n", __func__, rc); return rc; } vreg->vlevel = reg; vreg->uV = vreg->vlevel * vreg->range->step_uV + vreg->range->min_uV; vreg->last_set_uV = vreg->uV; return rc; } static int qpnp_fts2_init_mode(struct spm_vreg *vreg) { const char *mode_name; int rc; rc = of_property_read_string(vreg->spmi_dev->dev.of_node, "qcom,mode", &mode_name); if (!rc) { if (strcmp("pwm", mode_name) == 0) { vreg->init_mode = QPNP_FTS2_MODE_PWM; } else if (strcmp("auto", mode_name) == 0) { vreg->init_mode = QPNP_FTS2_MODE_AUTO; } else { dev_err(&vreg->spmi_dev->dev, "%s: unknown regulator mode: %s\n", __func__, mode_name); return -EINVAL; } rc = spmi_ext_register_writel(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_MODE, &vreg->init_mode, 1); if (rc) dev_err(&vreg->spmi_dev->dev, "%s: could not write mode register, rc=%d\n", __func__, rc); } else { rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_MODE, &vreg->init_mode, 1); if (rc) dev_err(&vreg->spmi_dev->dev, "%s: could not read mode register, rc=%d\n", __func__, rc); } return rc; } static int qpnp_fts2_init_step_rate(struct spm_vreg *vreg) { int rc; u8 reg = 0; int step, delay; rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid, vreg->spmi_base_addr + QPNP_FTS2_REG_STEP_CTRL, &reg, 1); if (rc) { dev_err(&vreg->spmi_dev->dev, "%s: could not read stepping control register, rc=%d\n", __func__, rc); return rc; } step = (reg & QPNP_FTS2_STEP_CTRL_STEP_MASK) >> QPNP_FTS2_STEP_CTRL_STEP_SHIFT; delay = (reg & QPNP_FTS2_STEP_CTRL_DELAY_MASK) >> QPNP_FTS2_STEP_CTRL_DELAY_SHIFT; /* step_rate has units of uV/us. */ vreg->step_rate = QPNP_FTS2_CLOCK_RATE * vreg->range->step_uV * (1 << step); vreg->step_rate /= 1000 * (8 << delay); vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM / QPNP_FTS2_STEP_MARGIN_DEN; /* Ensure that the stepping rate is greater than 0. */ vreg->step_rate = max(vreg->step_rate, 1); return rc; } static int __devinit spm_regulator_probe(struct spmi_device *spmi) { struct device_node *node = spmi->dev.of_node; struct regulator_init_data *init_data; struct spm_vreg *vreg; struct resource *res; int rc; if (!node) { dev_err(&spmi->dev, "%s: device node missing\n", __func__); return -ENODEV; } vreg = devm_kzalloc(&spmi->dev, sizeof(*vreg), GFP_KERNEL); if (!vreg) { pr_err("allocation failed.\n"); return -ENOMEM; } vreg->spmi_dev = spmi; res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0); if (!res) { dev_err(&spmi->dev, "%s: node is missing base address\n", __func__); return -EINVAL; } vreg->spmi_base_addr = res->start; rc = qpnp_fts2_check_type(vreg); if (rc) return rc; /* * The FTS2 regulator must be initialized to range 0 or range 1 during * PMIC power on sequence. Once it is set, it cannot be changed * dynamically. */ rc = qpnp_fts2_init_range(vreg); if (rc) return rc; rc = qpnp_fts2_init_voltage(vreg); if (rc) return rc; rc = qpnp_fts2_init_mode(vreg); if (rc) return rc; rc = qpnp_fts2_init_step_rate(vreg); if (rc) return rc; init_data = of_get_regulator_init_data(&spmi->dev, node); if (!init_data) { dev_err(&spmi->dev, "%s: unable to allocate memory\n", __func__); return -ENOMEM; } init_data->constraints.input_uV = init_data->constraints.max_uV; init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_VOLTAGE; if (!init_data->constraints.name) { dev_err(&spmi->dev, "%s: node is missing regulator name\n", __func__); return -EINVAL; } vreg->rdesc.name = init_data->constraints.name; vreg->rdesc.type = REGULATOR_VOLTAGE; vreg->rdesc.owner = THIS_MODULE; vreg->rdesc.ops = &spm_regulator_ops; vreg->rdesc.n_voltages = (vreg->range->max_uV - vreg->range->set_point_min_uV) / vreg->range->step_uV + 1; vreg->rdev = regulator_register(&vreg->rdesc, &spmi->dev, init_data, vreg, node); if (IS_ERR(vreg->rdev)) { rc = PTR_ERR(vreg->rdev); dev_err(&spmi->dev, "%s: regulator_register failed, rc=%d\n", __func__, rc); return rc; } dev_set_drvdata(&spmi->dev, vreg); pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n", vreg->rdesc.name, vreg->range == &fts2_range0 ? "LV" : "MV", vreg->uV, vreg->init_mode & QPNP_FTS2_MODE_PWM ? "PWM" : (vreg->init_mode & QPNP_FTS2_MODE_AUTO ? "AUTO" : "PFM"), vreg->step_rate); return rc; } static int __devexit spm_regulator_remove(struct spmi_device *spmi) { struct spm_vreg *vreg = dev_get_drvdata(&spmi->dev); regulator_unregister(vreg->rdev); return 0; } static struct of_device_id spm_regulator_match_table[] = { { .compatible = SPM_REGULATOR_DRIVER_NAME, }, {} }; static const struct spmi_device_id spm_regulator_id[] = { { SPM_REGULATOR_DRIVER_NAME, 0 }, {} }; MODULE_DEVICE_TABLE(spmi, spm_regulator_id); static struct spmi_driver spm_regulator_driver = { .driver = { .name = SPM_REGULATOR_DRIVER_NAME, .of_match_table = spm_regulator_match_table, .owner = THIS_MODULE, }, .probe = spm_regulator_probe, .remove = __devexit_p(spm_regulator_remove), .id_table = spm_regulator_id, }; /** * spm_regulator_init() - register spmi driver for spm-regulator * * This initialization function should be called in systems in which driver * registration ordering must be controlled precisely. * * Returns 0 on success or errno on failure. */ int __init spm_regulator_init(void) { static bool has_registered; if (has_registered) return 0; else has_registered = true; return spmi_driver_register(&spm_regulator_driver); } EXPORT_SYMBOL(spm_regulator_init); static void __exit spm_regulator_exit(void) { spmi_driver_unregister(&spm_regulator_driver); } arch_initcall(spm_regulator_init); module_exit(spm_regulator_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("SPM regulator driver"); MODULE_ALIAS("platform:spm-regulator");
gpl-2.0
BOOTMGR/GT-I9070_kernel
fs/jfs/acl.c
2620
4739
/* * Copyright (C) International Business Machines Corp., 2002-2004 * Copyright (C) Andreas Gruenbacher, 2001 * Copyright (C) Linus Torvalds, 1991, 1992 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/posix_acl_xattr.h> #include "jfs_incore.h" #include "jfs_txnmgr.h" #include "jfs_xattr.h" #include "jfs_acl.h" static struct posix_acl *jfs_get_acl(struct inode *inode, int type) { struct posix_acl *acl; char *ea_name; int size; char *value = NULL; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; switch(type) { case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: ea_name = POSIX_ACL_XATTR_DEFAULT; break; default: return ERR_PTR(-EINVAL); } size = __jfs_getxattr(inode, ea_name, NULL, 0); if (size > 0) { value = kmalloc(size, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); size = __jfs_getxattr(inode, ea_name, value, size); } if (size < 0) { if (size == -ENODATA) acl = NULL; else acl = ERR_PTR(size); } else { acl = posix_acl_from_xattr(value, size); } kfree(value); if (!IS_ERR(acl)) set_cached_acl(inode, type, acl); return acl; } static int jfs_set_acl(tid_t tid, struct inode *inode, int type, struct posix_acl *acl) { char *ea_name; int rc; int size = 0; char *value = NULL; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch(type) { case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: ea_name = POSIX_ACL_XATTR_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { size = posix_acl_xattr_size(acl->a_count); value = kmalloc(size, GFP_KERNEL); if (!value) return -ENOMEM; rc = posix_acl_to_xattr(acl, value, size); if (rc < 0) goto out; } rc = __jfs_setxattr(tid, inode, ea_name, value, size, 0); out: kfree(value); if (!rc) set_cached_acl(inode, type, acl); return rc; } int jfs_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; if (flags & IPERM_FLAG_RCU) return -ECHILD; acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } return -EAGAIN; } int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir) { struct posix_acl *acl = NULL; struct posix_acl *clone; mode_t mode; int rc = 0; if (S_ISLNK(inode->i_mode)) return 0; acl = jfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { if (S_ISDIR(inode->i_mode)) { rc = jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, acl); if (rc) goto cleanup; } clone = posix_acl_clone(acl, GFP_KERNEL); if (!clone) { rc = -ENOMEM; goto cleanup; } mode = inode->i_mode; rc = posix_acl_create_masq(clone, &mode); if (rc >= 0) { inode->i_mode = mode; if (rc > 0) rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); } posix_acl_release(clone); cleanup: posix_acl_release(acl); } else inode->i_mode &= ~current_umask(); JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) | inode->i_mode; return rc; } int jfs_acl_chmod(struct inode *inode) { struct posix_acl *acl, *clone; int rc; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); clone = posix_acl_clone(acl, GFP_KERNEL); posix_acl_release(acl); if (!clone) return -ENOMEM; rc = posix_acl_chmod_masq(clone, inode->i_mode); if (!rc) { tid_t tid = txBegin(inode->i_sb, 0); mutex_lock(&JFS_IP(inode)->commit_mutex); rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); if (!rc) rc = txCommit(tid, 1, &inode, 0); txEnd(tid); mutex_unlock(&JFS_IP(inode)->commit_mutex); } posix_acl_release(clone); return rc; }
gpl-2.0
01org/KVMGT-kernel
drivers/mtd/mtdoops.c
4668
12101
/* * MTD Oops/Panic logger * * Copyright © 2007 Nokia Corporation. All rights reserved. * * Author: Richard Purdie <rpurdie@openedhand.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/console.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #include <linux/kmsg_dump.h> /* Maximum MTD partition size */ #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 #define MTDOOPS_HEADER_SIZE 8 static unsigned long record_size = 4096; module_param(record_size, ulong, 0400); MODULE_PARM_DESC(record_size, "record size for MTD OOPS pages in bytes (default 4096)"); static char mtddev[80]; module_param_string(mtddev, mtddev, 80, 0400); MODULE_PARM_DESC(mtddev, "name or index number of the MTD device to use"); static int dump_oops = 1; module_param(dump_oops, int, 0600); MODULE_PARM_DESC(dump_oops, "set to 1 to dump oopses, 0 to only dump panics (default 1)"); static struct mtdoops_context { struct kmsg_dumper dump; int mtd_index; struct work_struct work_erase; struct work_struct work_write; struct mtd_info *mtd; int oops_pages; int nextpage; int nextcount; unsigned long *oops_page_used; void *oops_buf; } oops_cxt; static void mark_page_used(struct mtdoops_context *cxt, int page) { set_bit(page, cxt->oops_page_used); } static void mark_page_unused(struct mtdoops_context *cxt, int page) { clear_bit(page, cxt->oops_page_used); } static int page_is_used(struct mtdoops_context *cxt, int page) { return test_bit(page, cxt->oops_page_used); } static void mtdoops_erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) { struct mtd_info *mtd = cxt->mtd; u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; u32 start_page = start_page_offset / record_size; u32 erase_pages = mtd->erasesize / record_size; struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; int ret; int page; init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = mtdoops_erase_callback; erase.addr = offset; erase.len = mtd->erasesize; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = mtd_erase(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", (unsigned long long)erase.addr, (unsigned long long)erase.len, mtddev); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); /* Mark pages as unused */ for (page = start_page; page < start_page + erase_pages; page++) mark_page_unused(cxt, page); return 0; } static void mtdoops_inc_counter(struct mtdoops_context *cxt) { cxt->nextpage++; if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; cxt->nextcount++; if (cxt->nextcount == 0xffffffff) cxt->nextcount = 0; if (page_is_used(cxt, cxt->nextpage)) { schedule_work(&cxt->work_erase); return; } printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); } /* Scheduled work - when we can't proceed without erasing a block */ static void mtdoops_workfunc_erase(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_erase); struct mtd_info *mtd = cxt->mtd; int i = 0, j, ret, mod; /* We were unregistered */ if (!mtd) return; mod = (cxt->nextpage * record_size) % mtd->erasesize; if (mod != 0) { cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) { badblock: printk(KERN_WARNING "mtdoops: bad block at %08lx\n", cxt->nextpage * record_size); i++; cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { printk(KERN_ERR "mtdoops: all blocks bad!\n"); return; } } if (ret < 0) { printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n"); return; } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { printk(KERN_DEBUG "mtdoops: ready %d, %d\n", cxt->nextpage, cxt->nextcount); return; } if (ret == -EIO) { ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); if (ret < 0 && ret != -EOPNOTSUPP) { printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); return; } } goto badblock; } static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; size_t retlen; u32 *hdr; int ret; /* Add mtdoops header to the buffer */ hdr = cxt->oops_buf; hdr[0] = cxt->nextcount; hdr[1] = MTDOOPS_KERNMSG_MAGIC; if (panic) { ret = mtd_panic_write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); if (ret == -EOPNOTSUPP) { printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); return; } } else ret = mtd_write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); if (retlen != record_size || ret < 0) printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", cxt->nextpage * record_size, retlen, record_size, ret); mark_page_used(cxt, cxt->nextpage); memset(cxt->oops_buf, 0xff, record_size); mtdoops_inc_counter(cxt); } static void mtdoops_workfunc_write(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_write); mtdoops_write(cxt, 0); } static void find_next_position(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; int ret, page, maxpos = 0; u32 count[2], maxcount = 0xffffffff; size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { if (mtd_block_isbad(mtd, page * record_size)) continue; /* Assume the page is used */ mark_page_used(cxt, page); ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, &retlen, (u_char *)&count[0]); if (retlen != MTDOOPS_HEADER_SIZE || (ret < 0 && !mtd_is_bitflip(ret))) { printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", page * record_size, retlen, MTDOOPS_HEADER_SIZE, ret); continue; } if (count[0] == 0xffffffff && count[1] == 0xffffffff) mark_page_unused(cxt, page); if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC) continue; if (maxcount == 0xffffffff) { maxcount = count[0]; maxpos = page; } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { maxcount = count[0]; maxpos = page; } else if (count[0] > maxcount && count[0] < 0xc0000000) { maxcount = count[0]; maxpos = page; } else if (count[0] > maxcount && count[0] > 0xc0000000 && maxcount > 0x80000000) { maxcount = count[0]; maxpos = page; } } if (maxcount == 0xffffffff) { cxt->nextpage = cxt->oops_pages - 1; cxt->nextcount = 0; } else { cxt->nextpage = maxpos; cxt->nextcount = maxcount; } mtdoops_inc_counter(cxt); } static void mtdoops_do_dump(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { struct mtdoops_context *cxt = container_of(dumper, struct mtdoops_context, dump); /* Only dump oopses if dump_oops is set */ if (reason == KMSG_DUMP_OOPS && !dump_oops) return; kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, record_size - MTDOOPS_HEADER_SIZE, NULL); /* Panics must be written immediately */ if (reason != KMSG_DUMP_OOPS) mtdoops_write(cxt, 1); /* For other cases, schedule work to write it "nicely" */ schedule_work(&cxt->work_write); } static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; u64 mtdoops_pages = div_u64(mtd->size, record_size); int err; if (!strcmp(mtd->name, mtddev)) cxt->mtd_index = mtd->index; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (mtd->size < mtd->erasesize * 2) { printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", mtd->index); return; } if (mtd->erasesize < record_size) { printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", mtd->index); return; } if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); return; } /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG) * sizeof(unsigned long)); if (!cxt->oops_page_used) { printk(KERN_ERR "mtdoops: could not allocate page array\n"); return; } cxt->dump.max_reason = KMSG_DUMP_OOPS; cxt->dump.dump = mtdoops_do_dump; err = kmsg_dump_register(&cxt->dump); if (err) { printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); vfree(cxt->oops_page_used); cxt->oops_page_used = NULL; return; } cxt->mtd = mtd; cxt->oops_pages = (int)mtd->size / record_size; find_next_position(cxt); printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); } static void mtdoops_notify_remove(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; if (kmsg_dump_unregister(&cxt->dump) < 0) printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); cxt->mtd = NULL; flush_work(&cxt->work_erase); flush_work(&cxt->work_write); } static struct mtd_notifier mtdoops_notifier = { .add = mtdoops_notify_add, .remove = mtdoops_notify_remove, }; static int __init mtdoops_init(void) { struct mtdoops_context *cxt = &oops_cxt; int mtd_index; char *endp; if (strlen(mtddev) == 0) { printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); return -EINVAL; } if ((record_size & 4095) != 0) { printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); return -EINVAL; } if (record_size < 4096) { printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); return -EINVAL; } /* Setup the MTD device to use */ cxt->mtd_index = -1; mtd_index = simple_strtoul(mtddev, &endp, 0); if (*endp == '\0') cxt->mtd_index = mtd_index; cxt->oops_buf = vmalloc(record_size); if (!cxt->oops_buf) { printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); return -ENOMEM; } memset(cxt->oops_buf, 0xff, record_size); INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); register_mtd_user(&mtdoops_notifier); return 0; } static void __exit mtdoops_exit(void) { struct mtdoops_context *cxt = &oops_cxt; unregister_mtd_user(&mtdoops_notifier); vfree(cxt->oops_buf); vfree(cxt->oops_page_used); } module_init(mtdoops_init); module_exit(mtdoops_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
gpl-2.0
s0627js/android_kernel_SHV-E300S
arch/arm/mach-omap2/dpll3xxx.c
4924
15825
/* * OMAP3/4 - specific DPLL control functions * * Copyright (C) 2009-2010 Texas Instruments, Inc. * Copyright (C) 2009-2010 Nokia Corporation * * Written by Paul Walmsley * Testing and integration fixes by Jouni Högander * * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth * Menon * * Parts of this code are based on code written by * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/clkdev.h> #include <plat/cpu.h> #include <plat/clock.h> #include "clock.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" /* CM_AUTOIDLE_PLL*.AUTO_* bit values */ #define DPLL_AUTOIDLE_DISABLE 0x0 #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 #define MAX_DPLL_WAIT_TRIES 1000000 /* Private functions */ /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ static void _omap3_dpll_write_clken(struct clk *clk, u8 clken_bits) { const struct dpll_data *dd; u32 v; dd = clk->dpll_data; v = __raw_readl(dd->control_reg); v &= ~dd->enable_mask; v |= clken_bits << __ffs(dd->enable_mask); __raw_writel(v, dd->control_reg); } /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ static int _omap3_wait_dpll_status(struct clk *clk, u8 state) { const struct dpll_data *dd; int i = 0; int ret = -EINVAL; dd = clk->dpll_data; state <<= __ffs(dd->idlest_mask); while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) && i < MAX_DPLL_WAIT_TRIES) { i++; udelay(1); } if (i == MAX_DPLL_WAIT_TRIES) { printk(KERN_ERR "clock: %s failed transition to '%s'\n", clk->name, (state) ? "locked" : "bypassed"); } else { pr_debug("clock: %s transition to '%s' in %d loops\n", clk->name, (state) ? "locked" : "bypassed", i); ret = 0; } return ret; } /* From 3430 TRM ES2 4.7.6.2 */ static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n) { unsigned long fint; u16 f = 0; fint = clk->dpll_data->clk_ref->rate / n; pr_debug("clock: fint is %lu\n", fint); if (fint >= 750000 && fint <= 1000000) f = 0x3; else if (fint > 1000000 && fint <= 1250000) f = 0x4; else if (fint > 1250000 && fint <= 1500000) f = 0x5; else if (fint > 1500000 && fint <= 1750000) f = 0x6; else if (fint > 1750000 && fint <= 2100000) f = 0x7; else if (fint > 7500000 && fint <= 10000000) f = 0xB; else if (fint > 10000000 && fint <= 12500000) f = 0xC; else if (fint > 12500000 && fint <= 15000000) f = 0xD; else if (fint > 15000000 && fint <= 17500000) f = 0xE; else if (fint > 17500000 && fint <= 21000000) f = 0xF; else pr_debug("clock: unknown freqsel setting for %d\n", n); return f; } /* * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report * readiness before returning. Will save and restore the DPLL's * autoidle state across the enable, per the CDP code. If the DPLL * locked successfully, return 0; if the DPLL did not lock in the time * allotted, or DPLL3 was passed in, return -EINVAL. */ static int _omap3_noncore_dpll_lock(struct clk *clk) { u8 ai; int r; pr_debug("clock: locking DPLL %s\n", clk->name); ai = omap3_dpll_autoidle_read(clk); omap3_dpll_deny_idle(clk); _omap3_dpll_write_clken(clk, DPLL_LOCKED); r = _omap3_wait_dpll_status(clk, 1); if (ai) omap3_dpll_allow_idle(clk); return r; } /* * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enter low-power bypass mode. In * bypass mode, the DPLL's rate is set equal to its parent clock's * rate. Waits for the DPLL to report readiness before returning. * Will save and restore the DPLL's autoidle state across the enable, * per the CDP code. If the DPLL entered bypass mode successfully, * return 0; if the DPLL did not enter bypass in the time allotted, or * DPLL3 was passed in, or the DPLL does not support low-power bypass, * return -EINVAL. */ static int _omap3_noncore_dpll_bypass(struct clk *clk) { int r; u8 ai; if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) return -EINVAL; pr_debug("clock: configuring DPLL %s for low-power bypass\n", clk->name); ai = omap3_dpll_autoidle_read(clk); _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); r = _omap3_wait_dpll_status(clk, 0); if (ai) omap3_dpll_allow_idle(clk); else omap3_dpll_deny_idle(clk); return r; } /* * _omap3_noncore_dpll_stop - instruct a DPLL to stop * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enter low-power stop. Will save and * restore the DPLL's autoidle state across the stop, per the CDP * code. If DPLL3 was passed in, or the DPLL does not support * low-power stop, return -EINVAL; otherwise, return 0. */ static int _omap3_noncore_dpll_stop(struct clk *clk) { u8 ai; if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) return -EINVAL; pr_debug("clock: stopping DPLL %s\n", clk->name); ai = omap3_dpll_autoidle_read(clk); _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); if (ai) omap3_dpll_allow_idle(clk); else omap3_dpll_deny_idle(clk); return 0; } /** * _lookup_dco - Lookup DCO used by j-type DPLL * @clk: pointer to a DPLL struct clk * @dco: digital control oscillator selector * @m: DPLL multiplier to set * @n: DPLL divider to set * * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" * * XXX This code is not needed for 3430/AM35xx; can it be optimized * out in non-multi-OMAP builds for those chips? */ static void _lookup_dco(struct clk *clk, u8 *dco, u16 m, u8 n) { unsigned long fint, clkinp; /* watch out for overflow */ clkinp = clk->parent->rate; fint = (clkinp / n) * m; if (fint < 1000000000) *dco = 2; else *dco = 4; } /** * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL * @clk: pointer to a DPLL struct clk * @sd_div: target sigma-delta divider * @m: DPLL multiplier to set * @n: DPLL divider to set * * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" * * XXX This code is not needed for 3430/AM35xx; can it be optimized * out in non-multi-OMAP builds for those chips? */ static void _lookup_sddiv(struct clk *clk, u8 *sd_div, u16 m, u8 n) { unsigned long clkinp, sd; /* watch out for overflow */ int mod1, mod2; clkinp = clk->parent->rate; /* * target sigma-delta to near 250MHz * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] */ clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ mod1 = (clkinp * m) % (250 * n); sd = (clkinp * m) / (250 * n); mod2 = sd % 10; sd /= 10; if (mod1 || mod2) sd++; *sd_div = sd; } /* * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly * @clk: struct clk * of DPLL to set * @m: DPLL multiplier to set * @n: DPLL divider to set * @freqsel: FREQSEL value to set * * Program the DPLL with the supplied M, N values, and wait for the DPLL to * lock.. Returns -EINVAL upon error, or 0 upon success. */ static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel) { struct dpll_data *dd = clk->dpll_data; u8 dco, sd_div; u32 v; /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ _omap3_noncore_dpll_bypass(clk); /* * Set jitter correction. No jitter correction for OMAP4 and 3630 * since freqsel field is no longer present */ if (!cpu_is_omap44xx() && !cpu_is_omap3630()) { v = __raw_readl(dd->control_reg); v &= ~dd->freqsel_mask; v |= freqsel << __ffs(dd->freqsel_mask); __raw_writel(v, dd->control_reg); } /* Set DPLL multiplier, divider */ v = __raw_readl(dd->mult_div1_reg); v &= ~(dd->mult_mask | dd->div1_mask); v |= m << __ffs(dd->mult_mask); v |= (n - 1) << __ffs(dd->div1_mask); /* Configure dco and sd_div for dplls that have these fields */ if (dd->dco_mask) { _lookup_dco(clk, &dco, m, n); v &= ~(dd->dco_mask); v |= dco << __ffs(dd->dco_mask); } if (dd->sddiv_mask) { _lookup_sddiv(clk, &sd_div, m, n); v &= ~(dd->sddiv_mask); v |= sd_div << __ffs(dd->sddiv_mask); } __raw_writel(v, dd->mult_div1_reg); /* We let the clock framework set the other output dividers later */ /* REVISIT: Set ramp-up delay? */ _omap3_noncore_dpll_lock(clk); return 0; } /* Public functions */ /** * omap3_dpll_recalc - recalculate DPLL rate * @clk: DPLL struct clk * * Recalculate and propagate the DPLL rate. */ unsigned long omap3_dpll_recalc(struct clk *clk) { return omap2_get_dpll_rate(clk); } /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ /** * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. * The choice of modes depends on the DPLL's programmed rate: if it is * the same as the DPLL's parent clock, it will enter bypass; * otherwise, it will enter lock. This code will wait for the DPLL to * indicate readiness before returning, unless the DPLL takes too long * to enter the target state. Intended to be used as the struct clk's * enable function. If DPLL3 was passed in, or the DPLL does not * support low-power stop, or if the DPLL took too long to enter * bypass or lock, return -EINVAL; otherwise, return 0. */ int omap3_noncore_dpll_enable(struct clk *clk) { int r; struct dpll_data *dd; dd = clk->dpll_data; if (!dd) return -EINVAL; if (clk->rate == dd->clk_bypass->rate) { WARN_ON(clk->parent != dd->clk_bypass); r = _omap3_noncore_dpll_bypass(clk); } else { WARN_ON(clk->parent != dd->clk_ref); r = _omap3_noncore_dpll_lock(clk); } /* *FIXME: this is dubious - if clk->rate has changed, what about * propagating? */ if (!r) clk->rate = (clk->recalc) ? clk->recalc(clk) : omap2_get_dpll_rate(clk); return r; } /** * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enter low-power stop. This function is * intended for use in struct clkops. No return value. */ void omap3_noncore_dpll_disable(struct clk *clk) { _omap3_noncore_dpll_stop(clk); } /* Non-CORE DPLL rate set code */ /** * omap3_noncore_dpll_set_rate - set non-core DPLL rate * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Set the DPLL CLKOUT to the target rate. If the DPLL can enter * low-power bypass, and the target rate is the bypass source clock * rate, then configure the DPLL for bypass. Otherwise, round the * target rate if it hasn't been done already, then program and lock * the DPLL. Returns -EINVAL upon error, or 0 upon success. */ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { struct clk *new_parent = NULL; unsigned long hw_rate; u16 freqsel = 0; struct dpll_data *dd; int ret; if (!clk || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; hw_rate = (clk->recalc) ? clk->recalc(clk) : omap2_get_dpll_rate(clk); if (rate == hw_rate) return 0; /* * Ensure both the bypass and ref clocks are enabled prior to * doing anything; we need the bypass clock running to reprogram * the DPLL. */ omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); if (dd->clk_bypass->rate == rate && (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("clock: %s: set rate: entering bypass.\n", clk->name); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) rate = clk->round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* No freqsel on OMAP4 and OMAP3630 */ if (!cpu_is_omap44xx() && !cpu_is_omap3630()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); if (!freqsel) WARN_ON(1); } pr_debug("clock: %s: set rate: locking rate to %lu.\n", clk->name, rate); ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m, dd->last_rounded_n, freqsel); if (!ret) new_parent = dd->clk_ref; } if (!ret) { /* * Switch the parent clock in the hierarchy, and make sure * that the new parent's usecount is correct. Note: we * enable the new parent before disabling the old to avoid * any unnecessary hardware disable->enable transitions. */ if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; } omap2_clk_disable(dd->clk_ref); omap2_clk_disable(dd->clk_bypass); return 0; } /* DPLL autoidle read/set code */ /** * omap3_dpll_autoidle_read - read a DPLL's autoidle bits * @clk: struct clk * of the DPLL to read * * Return the DPLL's autoidle bits, shifted down to bit 0. Returns * -EINVAL if passed a null pointer or if the struct clk does not * appear to refer to a DPLL. */ u32 omap3_dpll_autoidle_read(struct clk *clk) { const struct dpll_data *dd; u32 v; if (!clk || !clk->dpll_data) return -EINVAL; dd = clk->dpll_data; v = __raw_readl(dd->autoidle_reg); v &= dd->autoidle_mask; v >>= __ffs(dd->autoidle_mask); return v; } /** * omap3_dpll_allow_idle - enable DPLL autoidle bits * @clk: struct clk * of the DPLL to operate on * * Enable DPLL automatic idle control. This automatic idle mode * switching takes effect only when the DPLL is locked, at least on * OMAP3430. The DPLL will enter low-power stop when its downstream * clocks are gated. No return value. */ void omap3_dpll_allow_idle(struct clk *clk) { const struct dpll_data *dd; u32 v; if (!clk || !clk->dpll_data) return; dd = clk->dpll_data; /* * REVISIT: CORE DPLL can optionally enter low-power bypass * by writing 0x5 instead of 0x1. Add some mechanism to * optionally enter this mode. */ v = __raw_readl(dd->autoidle_reg); v &= ~dd->autoidle_mask; v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); __raw_writel(v, dd->autoidle_reg); } /** * omap3_dpll_deny_idle - prevent DPLL from automatically idling * @clk: struct clk * of the DPLL to operate on * * Disable DPLL automatic idle control. No return value. */ void omap3_dpll_deny_idle(struct clk *clk) { const struct dpll_data *dd; u32 v; if (!clk || !clk->dpll_data) return; dd = clk->dpll_data; v = __raw_readl(dd->autoidle_reg); v &= ~dd->autoidle_mask; v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); __raw_writel(v, dd->autoidle_reg); } /* Clock control for DPLL outputs */ /** * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate * @clk: DPLL output struct clk * * Using parent clock DPLL data, look up DPLL state. If locked, set our * rate to the dpll_clk * 2; otherwise, just use dpll_clk. */ unsigned long omap3_clkoutx2_recalc(struct clk *clk) { const struct dpll_data *dd; unsigned long rate; u32 v; struct clk *pclk; /* Walk up the parents of clk, looking for a DPLL */ pclk = clk->parent; while (pclk && !pclk->dpll_data) pclk = pclk->parent; /* clk does not have a DPLL as a parent? */ WARN_ON(!pclk); dd = pclk->dpll_data; WARN_ON(!dd->enable_mask); v = __raw_readl(dd->control_reg) & dd->enable_mask; v >>= __ffs(dd->enable_mask); if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) rate = clk->parent->rate; else rate = clk->parent->rate * 2; return rate; }
gpl-2.0
SimpleAOSP-Kernel/kernel_hammerhead
net/sctp/proc.c
5180
13873
/* SCTP kernel implementation * Copyright (c) 2003 International Business Machines, Corp. * * This file is part of the SCTP kernel implementation * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <net/sctp/sctp.h> #include <net/ip.h> /* for snmp_fold_field */ static const struct snmp_mib sctp_snmp_list[] = { SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS), SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS), SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES), SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS), SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS), SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS), SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS), SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS), SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS), SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS), SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS), SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), SNMP_MIB_ITEM("SctpT1InitExpireds", SCTP_MIB_T1_INIT_EXPIREDS), SNMP_MIB_ITEM("SctpT1CookieExpireds", SCTP_MIB_T1_COOKIE_EXPIREDS), SNMP_MIB_ITEM("SctpT2ShutdownExpireds", SCTP_MIB_T2_SHUTDOWN_EXPIREDS), SNMP_MIB_ITEM("SctpT3RtxExpireds", SCTP_MIB_T3_RTX_EXPIREDS), SNMP_MIB_ITEM("SctpT4RtoExpireds", SCTP_MIB_T4_RTO_EXPIREDS), SNMP_MIB_ITEM("SctpT5ShutdownGuardExpireds", SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS), SNMP_MIB_ITEM("SctpDelaySackExpireds", SCTP_MIB_DELAY_SACK_EXPIREDS), SNMP_MIB_ITEM("SctpAutocloseExpireds", SCTP_MIB_AUTOCLOSE_EXPIREDS), SNMP_MIB_ITEM("SctpT3Retransmits", SCTP_MIB_T3_RETRANSMITS), SNMP_MIB_ITEM("SctpPmtudRetransmits", SCTP_MIB_PMTUD_RETRANSMITS), SNMP_MIB_ITEM("SctpFastRetransmits", SCTP_MIB_FAST_RETRANSMITS), SNMP_MIB_ITEM("SctpInPktSoftirq", SCTP_MIB_IN_PKT_SOFTIRQ), SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG), SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS), SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS), SNMP_MIB_SENTINEL }; /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ static int sctp_snmp_seq_show(struct seq_file *seq, void *v) { int i; for (i = 0; sctp_snmp_list[i].name != NULL; i++) seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, snmp_fold_field((void __percpu **)sctp_statistics, sctp_snmp_list[i].entry)); return 0; } /* Initialize the seq file operations for 'snmp' object. */ static int sctp_snmp_seq_open(struct inode *inode, struct file *file) { return single_open(file, sctp_snmp_seq_show, NULL); } static const struct file_operations sctp_snmp_seq_fops = { .owner = THIS_MODULE, .open = sctp_snmp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Set up the proc fs entry for 'snmp' object. */ int __init sctp_snmp_proc_init(void) { struct proc_dir_entry *p; p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'snmp' object. */ void sctp_snmp_proc_exit(void) { remove_proc_entry("snmp", proc_net_sctp); } /* Dump local addresses of an association/endpoint. */ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) { struct sctp_association *asoc; struct sctp_sockaddr_entry *laddr; struct sctp_transport *peer; union sctp_addr *addr, *primary = NULL; struct sctp_af *af; if (epb->type == SCTP_EP_TYPE_ASSOCIATION) { asoc = sctp_assoc(epb); peer = asoc->peer.primary_path; primary = &peer->saddr; } list_for_each_entry(laddr, &epb->bind_addr.address_list, list) { addr = &laddr->a; af = sctp_get_af_specific(addr->sa.sa_family); if (primary && af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } /* Dump remote addresses of an association. */ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) { struct sctp_transport *transport; union sctp_addr *addr, *primary; struct sctp_af *af; primary = &assoc->peer.primary_addr; list_for_each_entry(transport, &assoc->peer.transport_addr_list, transports) { addr = &transport->ipaddr; af = sctp_get_af_specific(addr->sa.sa_family); if (af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_ep_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); return (void *)pos; } static void sctp_eps_seq_stop(struct seq_file *seq, void *v) { } static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_ep_hashsize) return NULL; return pos; } /* Display sctp endpoints (/proc/net/sctp/eps). */ static int sctp_eps_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; struct hlist_node *node; int hash = *(loff_t *)v; if (hash >= sctp_ep_hashsize) return -ENOMEM; head = &sctp_ep_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, sock_i_uid(sk), sock_i_ino(sk)); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "\n"); } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_eps_ops = { .start = sctp_eps_seq_start, .next = sctp_eps_seq_next, .stop = sctp_eps_seq_stop, .show = sctp_eps_seq_show, }; /* Initialize the seq file operations for 'eps' object. */ static int sctp_eps_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_eps_ops); } static const struct file_operations sctp_eps_seq_fops = { .open = sctp_eps_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Set up the proc fs entry for 'eps' object. */ int __init sctp_eps_proc_init(void) { struct proc_dir_entry *p; p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'eps' object. */ void sctp_eps_proc_exit(void) { remove_proc_entry("eps", proc_net_sctp); } static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ASSOC SOCK STY SST ST HBKT " "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " "RPORT LADDRS <-> RADDRS " "HBINT INS OUTS MAXRT T1X T2X RTXC\n"); return (void *)pos; } static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) { } static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_assoc_hashsize) return NULL; return pos; } /* Display sctp associations (/proc/net/sctp/assocs). */ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *assoc; struct sock *sk; struct hlist_node *node; int hash = *(loff_t *)v; if (hash >= sctp_assoc_hashsize) return -ENOMEM; head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { assoc = sctp_assoc(epb); sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-2d %-4d " "%4d %8d %8d %7d %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, hash, assoc->assoc_id, assoc->sndbuf_used, atomic_read(&assoc->rmem_alloc), sock_i_uid(sk), sock_i_ino(sk), epb->bind_addr.port, assoc->peer.port); seq_printf(seq, " "); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "<-> "); sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d ", assoc->hbinterval, assoc->c.sinit_max_instreams, assoc->c.sinit_num_ostreams, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks); seq_printf(seq, "\n"); } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_assoc_ops = { .start = sctp_assocs_seq_start, .next = sctp_assocs_seq_next, .stop = sctp_assocs_seq_stop, .show = sctp_assocs_seq_show, }; /* Initialize the seq file operations for 'assocs' object. */ static int sctp_assocs_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_assoc_ops); } static const struct file_operations sctp_assocs_seq_fops = { .open = sctp_assocs_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Set up the proc fs entry for 'assocs' object. */ int __init sctp_assocs_proc_init(void) { struct proc_dir_entry *p; p = proc_create("assocs", S_IRUGO, proc_net_sctp, &sctp_assocs_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'assocs' object. */ void sctp_assocs_proc_exit(void) { remove_proc_entry("assocs", proc_net_sctp); } static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " "REM_ADDR_RTX START\n"); return (void *)pos; } static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_assoc_hashsize) return NULL; return pos; } static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) { } static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *assoc; struct hlist_node *node; struct sctp_transport *tsp; int hash = *(loff_t *)v; if (hash >= sctp_assoc_hashsize) return -ENOMEM; head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { assoc = sctp_assoc(epb); list_for_each_entry(tsp, &assoc->peer.transport_addr_list, transports) { /* * The remote address (ADDR) */ tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr); seq_printf(seq, " "); /* * The association ID (ASSOC_ID) */ seq_printf(seq, "%d ", tsp->asoc->assoc_id); /* * If the Heartbeat is active (HB_ACT) * Note: 1 = Active, 0 = Inactive */ seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer)); /* * Retransmit time out (RTO) */ seq_printf(seq, "%lu ", tsp->rto); /* * Maximum path retransmit count (PATH_MAX_RTX) */ seq_printf(seq, "%d ", tsp->pathmaxrxt); /* * remote address retransmit count (REM_ADDR_RTX) * Note: We don't have a way to tally this at the moment * so lets just leave it as zero for the moment */ seq_printf(seq, "0 "); /* * remote address start time (START). This is also not * currently implemented, but we can record it with a * jiffies marker in a subsequent patch */ seq_printf(seq, "0"); seq_printf(seq, "\n"); } } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_remaddr_ops = { .start = sctp_remaddr_seq_start, .next = sctp_remaddr_seq_next, .stop = sctp_remaddr_seq_stop, .show = sctp_remaddr_seq_show, }; /* Cleanup the proc fs entry for 'remaddr' object. */ void sctp_remaddr_proc_exit(void) { remove_proc_entry("remaddr", proc_net_sctp); } static int sctp_remaddr_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_remaddr_ops); } static const struct file_operations sctp_remaddr_seq_fops = { .open = sctp_remaddr_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int __init sctp_remaddr_proc_init(void) { struct proc_dir_entry *p; p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops); if (!p) return -ENOMEM; return 0; }
gpl-2.0
supersonicninja/L01FJBKERNEL
net/sctp/proc.c
5180
13873
/* SCTP kernel implementation * Copyright (c) 2003 International Business Machines, Corp. * * This file is part of the SCTP kernel implementation * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <net/sctp/sctp.h> #include <net/ip.h> /* for snmp_fold_field */ static const struct snmp_mib sctp_snmp_list[] = { SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS), SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS), SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES), SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS), SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS), SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS), SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS), SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS), SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS), SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS), SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS), SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), SNMP_MIB_ITEM("SctpT1InitExpireds", SCTP_MIB_T1_INIT_EXPIREDS), SNMP_MIB_ITEM("SctpT1CookieExpireds", SCTP_MIB_T1_COOKIE_EXPIREDS), SNMP_MIB_ITEM("SctpT2ShutdownExpireds", SCTP_MIB_T2_SHUTDOWN_EXPIREDS), SNMP_MIB_ITEM("SctpT3RtxExpireds", SCTP_MIB_T3_RTX_EXPIREDS), SNMP_MIB_ITEM("SctpT4RtoExpireds", SCTP_MIB_T4_RTO_EXPIREDS), SNMP_MIB_ITEM("SctpT5ShutdownGuardExpireds", SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS), SNMP_MIB_ITEM("SctpDelaySackExpireds", SCTP_MIB_DELAY_SACK_EXPIREDS), SNMP_MIB_ITEM("SctpAutocloseExpireds", SCTP_MIB_AUTOCLOSE_EXPIREDS), SNMP_MIB_ITEM("SctpT3Retransmits", SCTP_MIB_T3_RETRANSMITS), SNMP_MIB_ITEM("SctpPmtudRetransmits", SCTP_MIB_PMTUD_RETRANSMITS), SNMP_MIB_ITEM("SctpFastRetransmits", SCTP_MIB_FAST_RETRANSMITS), SNMP_MIB_ITEM("SctpInPktSoftirq", SCTP_MIB_IN_PKT_SOFTIRQ), SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG), SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS), SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS), SNMP_MIB_SENTINEL }; /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ static int sctp_snmp_seq_show(struct seq_file *seq, void *v) { int i; for (i = 0; sctp_snmp_list[i].name != NULL; i++) seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, snmp_fold_field((void __percpu **)sctp_statistics, sctp_snmp_list[i].entry)); return 0; } /* Initialize the seq file operations for 'snmp' object. */ static int sctp_snmp_seq_open(struct inode *inode, struct file *file) { return single_open(file, sctp_snmp_seq_show, NULL); } static const struct file_operations sctp_snmp_seq_fops = { .owner = THIS_MODULE, .open = sctp_snmp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Set up the proc fs entry for 'snmp' object. */ int __init sctp_snmp_proc_init(void) { struct proc_dir_entry *p; p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'snmp' object. */ void sctp_snmp_proc_exit(void) { remove_proc_entry("snmp", proc_net_sctp); } /* Dump local addresses of an association/endpoint. */ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) { struct sctp_association *asoc; struct sctp_sockaddr_entry *laddr; struct sctp_transport *peer; union sctp_addr *addr, *primary = NULL; struct sctp_af *af; if (epb->type == SCTP_EP_TYPE_ASSOCIATION) { asoc = sctp_assoc(epb); peer = asoc->peer.primary_path; primary = &peer->saddr; } list_for_each_entry(laddr, &epb->bind_addr.address_list, list) { addr = &laddr->a; af = sctp_get_af_specific(addr->sa.sa_family); if (primary && af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } /* Dump remote addresses of an association. */ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) { struct sctp_transport *transport; union sctp_addr *addr, *primary; struct sctp_af *af; primary = &assoc->peer.primary_addr; list_for_each_entry(transport, &assoc->peer.transport_addr_list, transports) { addr = &transport->ipaddr; af = sctp_get_af_specific(addr->sa.sa_family); if (af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_ep_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); return (void *)pos; } static void sctp_eps_seq_stop(struct seq_file *seq, void *v) { } static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_ep_hashsize) return NULL; return pos; } /* Display sctp endpoints (/proc/net/sctp/eps). */ static int sctp_eps_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; struct hlist_node *node; int hash = *(loff_t *)v; if (hash >= sctp_ep_hashsize) return -ENOMEM; head = &sctp_ep_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, sock_i_uid(sk), sock_i_ino(sk)); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "\n"); } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_eps_ops = { .start = sctp_eps_seq_start, .next = sctp_eps_seq_next, .stop = sctp_eps_seq_stop, .show = sctp_eps_seq_show, }; /* Initialize the seq file operations for 'eps' object. */ static int sctp_eps_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_eps_ops); } static const struct file_operations sctp_eps_seq_fops = { .open = sctp_eps_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Set up the proc fs entry for 'eps' object. */ int __init sctp_eps_proc_init(void) { struct proc_dir_entry *p; p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'eps' object. */ void sctp_eps_proc_exit(void) { remove_proc_entry("eps", proc_net_sctp); } static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ASSOC SOCK STY SST ST HBKT " "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " "RPORT LADDRS <-> RADDRS " "HBINT INS OUTS MAXRT T1X T2X RTXC\n"); return (void *)pos; } static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) { } static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_assoc_hashsize) return NULL; return pos; } /* Display sctp associations (/proc/net/sctp/assocs). */ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *assoc; struct sock *sk; struct hlist_node *node; int hash = *(loff_t *)v; if (hash >= sctp_assoc_hashsize) return -ENOMEM; head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { assoc = sctp_assoc(epb); sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-2d %-4d " "%4d %8d %8d %7d %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, hash, assoc->assoc_id, assoc->sndbuf_used, atomic_read(&assoc->rmem_alloc), sock_i_uid(sk), sock_i_ino(sk), epb->bind_addr.port, assoc->peer.port); seq_printf(seq, " "); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "<-> "); sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d ", assoc->hbinterval, assoc->c.sinit_max_instreams, assoc->c.sinit_num_ostreams, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks); seq_printf(seq, "\n"); } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_assoc_ops = { .start = sctp_assocs_seq_start, .next = sctp_assocs_seq_next, .stop = sctp_assocs_seq_stop, .show = sctp_assocs_seq_show, }; /* Initialize the seq file operations for 'assocs' object. */ static int sctp_assocs_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_assoc_ops); } static const struct file_operations sctp_assocs_seq_fops = { .open = sctp_assocs_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Set up the proc fs entry for 'assocs' object. */ int __init sctp_assocs_proc_init(void) { struct proc_dir_entry *p; p = proc_create("assocs", S_IRUGO, proc_net_sctp, &sctp_assocs_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'assocs' object. */ void sctp_assocs_proc_exit(void) { remove_proc_entry("assocs", proc_net_sctp); } static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " "REM_ADDR_RTX START\n"); return (void *)pos; } static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_assoc_hashsize) return NULL; return pos; } static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) { } static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *assoc; struct hlist_node *node; struct sctp_transport *tsp; int hash = *(loff_t *)v; if (hash >= sctp_assoc_hashsize) return -ENOMEM; head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { assoc = sctp_assoc(epb); list_for_each_entry(tsp, &assoc->peer.transport_addr_list, transports) { /* * The remote address (ADDR) */ tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr); seq_printf(seq, " "); /* * The association ID (ASSOC_ID) */ seq_printf(seq, "%d ", tsp->asoc->assoc_id); /* * If the Heartbeat is active (HB_ACT) * Note: 1 = Active, 0 = Inactive */ seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer)); /* * Retransmit time out (RTO) */ seq_printf(seq, "%lu ", tsp->rto); /* * Maximum path retransmit count (PATH_MAX_RTX) */ seq_printf(seq, "%d ", tsp->pathmaxrxt); /* * remote address retransmit count (REM_ADDR_RTX) * Note: We don't have a way to tally this at the moment * so lets just leave it as zero for the moment */ seq_printf(seq, "0 "); /* * remote address start time (START). This is also not * currently implemented, but we can record it with a * jiffies marker in a subsequent patch */ seq_printf(seq, "0"); seq_printf(seq, "\n"); } } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_remaddr_ops = { .start = sctp_remaddr_seq_start, .next = sctp_remaddr_seq_next, .stop = sctp_remaddr_seq_stop, .show = sctp_remaddr_seq_show, }; /* Cleanup the proc fs entry for 'remaddr' object. */ void sctp_remaddr_proc_exit(void) { remove_proc_entry("remaddr", proc_net_sctp); } static int sctp_remaddr_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_remaddr_ops); } static const struct file_operations sctp_remaddr_seq_fops = { .open = sctp_remaddr_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int __init sctp_remaddr_proc_init(void) { struct proc_dir_entry *p; p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops); if (!p) return -ENOMEM; return 0; }
gpl-2.0
sloanyang/android_kernel_zte_u950
arch/s390/lib/uaccess_mvcos.c
8508
6062
/* * arch/s390/lib/uaccess_mvcos.c * * Optimized user space space access functions based on mvcos. * * Copyright (C) IBM Corp. 2006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Gerald Schaefer (gerald.schaefer@de.ibm.com) */ #include <linux/errno.h> #include <linux/mm.h> #include <asm/uaccess.h> #include <asm/futex.h> #include "uaccess.h" #ifndef __s390x__ #define AHI "ahi" #define ALR "alr" #define CLR "clr" #define LHI "lhi" #define SLR "slr" #else #define AHI "aghi" #define ALR "algr" #define CLR "clgr" #define LHI "lghi" #define SLR "slgr" #endif static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) { register unsigned long reg0 asm("0") = 0x81UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" "9: jz 7f\n" "1:"ALR" %0,%3\n" " "SLR" %1,%3\n" " "SLR" %2,%3\n" " j 0b\n" "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " "SLR" %4,%1\n" " "CLR" %0,%4\n" /* copy crosses next page boundary? */ " jnh 4f\n" "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" "10:"SLR" %0,%4\n" " "ALR" %2,%4\n" "4:"LHI" %4,-1\n" " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ " bras %3,6f\n" /* memset loop */ " xc 0(1,%2),0(%2)\n" "5: xc 0(256,%2),0(%2)\n" " la %2,256(%2)\n" "6:"AHI" %4,-256\n" " jnm 5b\n" " ex %4,0(%3)\n" " j 8f\n" "7:"SLR" %0,%0\n" "8: \n" EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) { if (size <= 256) return copy_from_user_std(size, ptr, x); return copy_from_user_mvcos(size, ptr, x); } static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" "6: jz 4f\n" "1:"ALR" %0,%3\n" " "SLR" %1,%3\n" " "SLR" %2,%3\n" " j 0b\n" "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " "SLR" %4,%1\n" " "CLR" %0,%4\n" /* copy crosses next page boundary? */ " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" "7:"SLR" %0,%4\n" " j 5f\n" "4:"SLR" %0,%0\n" "5: \n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) { if (size <= 256) return copy_to_user_std(size, ptr, x); return copy_to_user_mvcos(size, ptr, x); } static size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) { register unsigned long reg0 asm("0") = 0x810081UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; /* FIXME: copy with reduced length. */ asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" " jz 2f\n" "1:"ALR" %0,%3\n" " "SLR" %1,%3\n" " "SLR" %2,%3\n" " j 0b\n" "2:"SLR" %0,%0\n" "3: \n" EX_TABLE(0b,3b) : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } static size_t clear_user_mvcos(size_t size, void __user *to) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" " jz 4f\n" "1:"ALR" %0,%2\n" " "SLR" %1,%2\n" " j 0b\n" "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ " "SLR" %3,%1\n" " "CLR" %0,%3\n" /* copy crosses next page boundary? */ " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" " "SLR" %0,%3\n" " j 5f\n" "4:"SLR" %0,%0\n" "5: \n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); return size; } static size_t strnlen_user_mvcos(size_t count, const char __user *src) { char buf[256]; int rc; size_t done, len, len_str; done = 0; do { len = min(count - done, (size_t) 256); rc = uaccess.copy_from_user(len, src + done, buf); if (unlikely(rc == len)) return 0; len -= rc; len_str = strnlen(buf, len); done += len_str; } while ((len_str == len) && (done < count)); return done + 1; } static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, char *dst) { int rc; size_t done, len, len_str; done = 0; do { len = min(count - done, (size_t) 4096); rc = uaccess.copy_from_user(len, src + done, dst); if (unlikely(rc == len)) return -EFAULT; len -= rc; len_str = strnlen(dst, len); done += len_str; } while ((len_str == len) && (done < count)); return done; } struct uaccess_ops uaccess_mvcos = { .copy_from_user = copy_from_user_mvcos_check, .copy_from_user_small = copy_from_user_std, .copy_to_user = copy_to_user_mvcos_check, .copy_to_user_small = copy_to_user_std, .copy_in_user = copy_in_user_mvcos, .clear_user = clear_user_mvcos, .strnlen_user = strnlen_user_std, .strncpy_from_user = strncpy_from_user_std, .futex_atomic_op = futex_atomic_op_std, .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, }; struct uaccess_ops uaccess_mvcos_switch = { .copy_from_user = copy_from_user_mvcos, .copy_from_user_small = copy_from_user_mvcos, .copy_to_user = copy_to_user_mvcos, .copy_to_user_small = copy_to_user_mvcos, .copy_in_user = copy_in_user_mvcos, .clear_user = clear_user_mvcos, .strnlen_user = strnlen_user_mvcos, .strncpy_from_user = strncpy_from_user_mvcos, .futex_atomic_op = futex_atomic_op_pt, .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, };
gpl-2.0
ngvincent/android-kernel-oppo-find5
crypto/sha256_generic.c
8764
12471
/* * Cryptographic API. * * SHA-256, as specified in * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf * * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. * * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/byteorder.h> static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); } static inline u32 Maj(u32 x, u32 y, u32 z) { return (x & y) | (z & (x | y)); } #define e0(x) (ror32(x, 2) ^ ror32(x,13) ^ ror32(x,22)) #define e1(x) (ror32(x, 6) ^ ror32(x,11) ^ ror32(x,25)) #define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3)) #define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10)) static inline void LOAD_OP(int I, u32 *W, const u8 *input) { W[I] = __be32_to_cpu( ((__be32*)(input))[I] ); } static inline void BLEND_OP(int I, u32 *W) { W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; } static void sha256_transform(u32 *state, const u8 *input) { u32 a, b, c, d, e, f, g, h, t1, t2; u32 W[64]; int i; /* load the input */ for (i = 0; i < 16; i++) LOAD_OP(i, W, input); /* now blend */ for (i = 16; i < 64; i++) BLEND_OP(i, W); /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; /* now iterate */ t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x3956c25b + W[ 4]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x550c7dc3 + W[11]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x72be5d74 + W[12]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x240ca1cc + W[19]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x2de92c6f + W[20]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0xbf597fc7 + W[27]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0xc6e00bf3 + W[28]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x53380d13 + W[35]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x650a7354 + W[36]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0xc76c51a3 + W[43]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0xd192e819 + W[44]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x34b0bcb5 + W[51]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x391c0cb3 + W[52]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x8cc70208 + W[59]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x90befffa + W[60]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; /* clear any sensitive info... */ a = b = c = d = e = f = g = h = t1 = t2 = 0; memset(W, 0, 64 * sizeof(u32)); } static int sha224_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA224_H0; sctx->state[1] = SHA224_H1; sctx->state[2] = SHA224_H2; sctx->state[3] = SHA224_H3; sctx->state[4] = SHA224_H4; sctx->state[5] = SHA224_H5; sctx->state[6] = SHA224_H6; sctx->state[7] = SHA224_H7; sctx->count = 0; return 0; } static int sha256_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; sctx->state[2] = SHA256_H2; sctx->state[3] = SHA256_H3; sctx->state[4] = SHA256_H4; sctx->state[5] = SHA256_H5; sctx->state[6] = SHA256_H6; sctx->state[7] = SHA256_H7; sctx->count = 0; return 0; } static int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); unsigned int partial, done; const u8 *src; partial = sctx->count & 0x3f; sctx->count += len; done = 0; src = data; if ((partial + len) > 63) { if (partial) { done = -partial; memcpy(sctx->buf + partial, data, done + 64); src = sctx->buf; } do { sha256_transform(sctx->state, src); done += 64; src = data + done; } while (done + 63 < len); partial = 0; } memcpy(sctx->buf + partial, src, len - done); return 0; } static int sha256_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; __be64 bits; unsigned int index, pad_len; int i; static const u8 padding[64] = { 0x80, }; /* Save number of bits */ bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64. */ index = sctx->count & 0x3f; pad_len = (index < 56) ? (56 - index) : ((64+56) - index); sha256_update(desc, padding, pad_len); /* Append length (before padding) */ sha256_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha224_final(struct shash_desc *desc, u8 *hash) { u8 D[SHA256_DIGEST_SIZE]; sha256_final(desc, D); memcpy(hash, D, SHA224_DIGEST_SIZE); memset(D, 0, SHA256_DIGEST_SIZE); return 0; } static int sha256_export(struct shash_desc *desc, void *out) { struct sha256_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha256_import(struct shash_desc *desc, const void *in) { struct sha256_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg sha256 = { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_init, .update = sha256_update, .final = sha256_final, .export = sha256_export, .import = sha256_import, .descsize = sizeof(struct sha256_state), .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name= "sha256-generic", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg sha224 = { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_init, .update = sha256_update, .final = sha224_final, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name= "sha224-generic", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha256_generic_mod_init(void) { int ret = 0; ret = crypto_register_shash(&sha224); if (ret < 0) return ret; ret = crypto_register_shash(&sha256); if (ret < 0) crypto_unregister_shash(&sha224); return ret; } static void __exit sha256_generic_mod_fini(void) { crypto_unregister_shash(&sha224); crypto_unregister_shash(&sha256); } module_init(sha256_generic_mod_init); module_exit(sha256_generic_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); MODULE_ALIAS("sha224"); MODULE_ALIAS("sha256");
gpl-2.0
NSDCars5/kernel_nanhu_ares
arch/alpha/kernel/sys_takara.c
9020
8062
/* * linux/arch/alpha/kernel/sys_takara.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the TAKARA. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #include "pc873xx.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask[2] = { -1, -1 }; static inline void takara_update_irq_hw(unsigned long irq, unsigned long mask) { int regaddr; mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); outl(mask & 0xffff0000UL, regaddr); } static inline void takara_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); takara_update_irq_hw(irq, mask); } static void takara_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); takara_update_irq_hw(irq, mask); } static struct irq_chip takara_irq_type = { .name = "TAKARA", .irq_unmask = takara_enable_irq, .irq_mask = takara_disable_irq, .irq_mask_ack = takara_disable_irq, }; static void takara_device_interrupt(unsigned long vector) { unsigned intstatus; /* * The PALcode will have passed us vectors 0x800 or 0x810, * which are fairly arbitrary values and serve only to tell * us whether an interrupt has come in on IRQ0 or IRQ1. If * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's * probably ISA, but PCI interrupts can come through IRQ0 * as well if the interrupt controller isn't in accelerated * mode. * * OTOH, the accelerator thing doesn't seem to be working * overly well, so what we'll do instead is try directly * examining the Master Interrupt Register to see if it's a * PCI interrupt, and if _not_ then we'll pass it on to the * ISA handler. */ intstatus = inw(0x500) & 15; if (intstatus) { /* * This is a PCI interrupt. Check each bit and * despatch an interrupt if it's set. */ if (intstatus & 8) handle_irq(16+3); if (intstatus & 4) handle_irq(16+2); if (intstatus & 2) handle_irq(16+1); if (intstatus & 1) handle_irq(16+0); } else { isa_device_interrupt (vector); } } static void takara_srm_device_interrupt(unsigned long vector) { int irq = (vector - 0x800) >> 4; handle_irq(irq); } static void __init takara_init_irq(void) { long i; init_i8259a_irqs(); if (alpha_using_srm) { alpha_mv.device_interrupt = takara_srm_device_interrupt; } else { unsigned int ctlreg = inl(0x500); /* Return to non-accelerated mode. */ ctlreg &= ~0x8000; outl(ctlreg, 0x500); /* Enable the PCI interrupt register. */ ctlreg = 0x05107c00; outl(ctlreg, 0x500); } for (i = 16; i < 128; i += 16) takara_update_irq_hw(i, -1); for (i = 16; i < 128; ++i) { irq_set_chip_and_handler(i, &takara_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); } /* * The Takara has PCI devices 1, 2, and 3 configured to slots 20, * 19, and 18 respectively, in the default configuration. They can * also be jumpered to slots 8, 7, and 6 respectively, which is fun * because the SIO ISA bridge can also be slot 7. However, the SIO * doesn't explicitly generate PCI-type interrupts, so we can * assign it whatever the hell IRQ we like and it doesn't matter. */ static int __init takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[15][5] __initdata = { { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ /* These are behind the bridges. */ { 12, 12, 13, 14, 15}, /* slot 12 == nothing */ { 8, 8, 9, 19, 11}, /* slot 13 == nothing */ { 4, 4, 5, 6, 7}, /* slot 14 == nothing */ { 0, 0, 1, 2, 3}, /* slot 15 == nothing */ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ {64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */ {48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */ {32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */ {16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */ }; const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP; if (irq >= 0 && irq < 16) { /* Guess that we are behind a bridge. */ unsigned int busslot = PCI_SLOT(dev->bus->self->devfn); irq += irq_tab[busslot-min_idsel][0]; } return irq; } static int __init takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[15][5] __initdata = { { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ { -1, -1, -1, -1, -1}, /* slot 12 == nothing */ { -1, -1, -1, -1, -1}, /* slot 13 == nothing */ { -1, -1, -1, -1, -1}, /* slot 14 == nothing */ { -1, -1, -1, -1, -1}, /* slot 15 == nothing */ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ { -1, -1, -1, -1, -1}, /* slot 17 == nothing */ { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */ }; const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 __init takara_swizzle(struct pci_dev *dev, u8 *pinp) { int slot = PCI_SLOT(dev->devfn); int pin = *pinp; unsigned int ctlreg = inl(0x500); unsigned int busslot; if (!dev->bus->self) return slot; busslot = PCI_SLOT(dev->bus->self->devfn); /* Check for built-in bridges. */ if (dev->bus->number != 0 && busslot > 16 && ((1<<(36-busslot)) & ctlreg)) { if (pin == 1) pin += (20 - busslot); else { printk(KERN_WARNING "takara_swizzle: can only " "handle cards with INTA IRQ pin.\n"); } } else { /* Must be a card-based bridge. */ printk(KERN_WARNING "takara_swizzle: cannot handle " "card-bridge behind builtin bridge yet.\n"); } *pinp = pin; return slot; } static void __init takara_init_pci(void) { if (alpha_using_srm) alpha_mv.pci_map_irq = takara_map_irq_srm; cia_init_pci(); if (pc873xx_probe() == -1) { printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); } else { printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", pc873xx_get_model(), pc873xx_get_base()); pc873xx_enable_ide(); } } /* * The System Vector */ struct alpha_machine_vector takara_mv __initmv = { .vector_name = "Takara", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 128, .device_interrupt = takara_device_interrupt, .init_arch = cia_init_arch, .init_irq = takara_init_irq, .init_rtc = common_init_rtc, .init_pci = takara_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = takara_map_irq, .pci_swizzle = takara_swizzle, }; ALIAS_MV(takara)
gpl-2.0
lbule/android_kernel_htc_m9pw
arch/alpha/kernel/sys_sx164.c
9020
4587
/* * linux/arch/alpha/kernel/sys_sx164.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code supporting the SX164 (PCA56+PYXIS). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_cia.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include <asm/special_insns.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void __init sx164_init_irq(void) { outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; init_i8259a_irqs(); /* Not interested in the bogus interrupts (0,3,4,5,40-47), NMI (1), or HALT (2). */ if (alpha_using_srm) init_srm_irqs(40, 0x3f0000); else init_pyxis_irqs(0xff00003f0000UL); setup_irq(16+6, &timer_cascade_irqaction); } /* * PCI Fixup configuration. * * Summary @ PYXIS_INT_REQ: * Bit Meaning * 0 RSVD * 1 NMI * 2 Halt/Reset switch * 3 MBZ * 4 RAZ * 5 RAZ * 6 Interval timer (RTC) * 7 PCI-ISA Bridge * 8 Interrupt Line A from slot 3 * 9 Interrupt Line A from slot 2 *10 Interrupt Line A from slot 1 *11 Interrupt Line A from slot 0 *12 Interrupt Line B from slot 3 *13 Interrupt Line B from slot 2 *14 Interrupt Line B from slot 1 *15 Interrupt line B from slot 0 *16 Interrupt Line C from slot 3 *17 Interrupt Line C from slot 2 *18 Interrupt Line C from slot 1 *19 Interrupt Line C from slot 0 *20 Interrupt Line D from slot 3 *21 Interrupt Line D from slot 2 *22 Interrupt Line D from slot 1 *23 Interrupt Line D from slot 0 * * IdSel * 5 32 bit PCI option slot 2 * 6 64 bit PCI option slot 0 * 7 64 bit PCI option slot 1 * 8 Cypress I/O * 9 32 bit PCI option slot 3 */ static int __init sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] __initdata = { /*INT INTA INTB INTC INTD */ { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ { 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */ { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */ { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */ }; const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static void __init sx164_init_pci(void) { cia_init_pci(); SMC669_Init(0); } static void __init sx164_init_arch(void) { /* * OSF palcode v1.23 forgets to enable PCA56 Motion Video * Instructions. Let's enable it. * We have to check palcode revision because CSERVE interface * is subject to change without notice. For example, it * has been changed completely since v1.16 (found in MILO * distribution). -ink */ struct percpu_struct *cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); if (amask(AMASK_MAX) != 0 && alpha_using_srm && (cpu->pal_revision & 0xffff) <= 0x117) { __asm__ __volatile__( "lda $16,8($31)\n" "call_pal 9\n" /* Allow PALRES insns in kernel mode */ ".long 0x64000118\n\n" /* hw_mfpr $0,icsr */ "ldah $16,(1<<(19-16))($31)\n" "or $0,$16,$0\n" /* set MVE bit */ ".long 0x74000118\n" /* hw_mtpr $0,icsr */ "lda $16,9($31)\n" "call_pal 9" /* Disable PALRES insns */ : : : "$0", "$16"); printk("PCA56 MVI set enabled\n"); } pyxis_init_arch(); } /* * The System Vector */ struct alpha_machine_vector sx164_mv __initmv = { .vector_name = "SX164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_PYXIS_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = PYXIS_DAC_OFFSET, .nr_irqs = 48, .device_interrupt = pyxis_device_interrupt, .init_arch = sx164_init_arch, .init_irq = sx164_init_irq, .init_rtc = common_init_rtc, .init_pci = sx164_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = sx164_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(sx164)
gpl-2.0
vmayoral/ubuntu-vivid
arch/alpha/kernel/sys_sx164.c
9020
4587
/* * linux/arch/alpha/kernel/sys_sx164.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code supporting the SX164 (PCA56+PYXIS). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_cia.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include <asm/special_insns.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void __init sx164_init_irq(void) { outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; init_i8259a_irqs(); /* Not interested in the bogus interrupts (0,3,4,5,40-47), NMI (1), or HALT (2). */ if (alpha_using_srm) init_srm_irqs(40, 0x3f0000); else init_pyxis_irqs(0xff00003f0000UL); setup_irq(16+6, &timer_cascade_irqaction); } /* * PCI Fixup configuration. * * Summary @ PYXIS_INT_REQ: * Bit Meaning * 0 RSVD * 1 NMI * 2 Halt/Reset switch * 3 MBZ * 4 RAZ * 5 RAZ * 6 Interval timer (RTC) * 7 PCI-ISA Bridge * 8 Interrupt Line A from slot 3 * 9 Interrupt Line A from slot 2 *10 Interrupt Line A from slot 1 *11 Interrupt Line A from slot 0 *12 Interrupt Line B from slot 3 *13 Interrupt Line B from slot 2 *14 Interrupt Line B from slot 1 *15 Interrupt line B from slot 0 *16 Interrupt Line C from slot 3 *17 Interrupt Line C from slot 2 *18 Interrupt Line C from slot 1 *19 Interrupt Line C from slot 0 *20 Interrupt Line D from slot 3 *21 Interrupt Line D from slot 2 *22 Interrupt Line D from slot 1 *23 Interrupt Line D from slot 0 * * IdSel * 5 32 bit PCI option slot 2 * 6 64 bit PCI option slot 0 * 7 64 bit PCI option slot 1 * 8 Cypress I/O * 9 32 bit PCI option slot 3 */ static int __init sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] __initdata = { /*INT INTA INTB INTC INTD */ { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ { 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */ { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */ { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */ }; const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static void __init sx164_init_pci(void) { cia_init_pci(); SMC669_Init(0); } static void __init sx164_init_arch(void) { /* * OSF palcode v1.23 forgets to enable PCA56 Motion Video * Instructions. Let's enable it. * We have to check palcode revision because CSERVE interface * is subject to change without notice. For example, it * has been changed completely since v1.16 (found in MILO * distribution). -ink */ struct percpu_struct *cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); if (amask(AMASK_MAX) != 0 && alpha_using_srm && (cpu->pal_revision & 0xffff) <= 0x117) { __asm__ __volatile__( "lda $16,8($31)\n" "call_pal 9\n" /* Allow PALRES insns in kernel mode */ ".long 0x64000118\n\n" /* hw_mfpr $0,icsr */ "ldah $16,(1<<(19-16))($31)\n" "or $0,$16,$0\n" /* set MVE bit */ ".long 0x74000118\n" /* hw_mtpr $0,icsr */ "lda $16,9($31)\n" "call_pal 9" /* Disable PALRES insns */ : : : "$0", "$16"); printk("PCA56 MVI set enabled\n"); } pyxis_init_arch(); } /* * The System Vector */ struct alpha_machine_vector sx164_mv __initmv = { .vector_name = "SX164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_PYXIS_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = PYXIS_DAC_OFFSET, .nr_irqs = 48, .device_interrupt = pyxis_device_interrupt, .init_arch = sx164_init_arch, .init_irq = sx164_init_irq, .init_rtc = common_init_rtc, .init_pci = sx164_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = sx164_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(sx164)
gpl-2.0
thegreatergood/LiteKernel---Glide
drivers/input/keyboard/hilkbd.c
9788
8804
/* * linux/drivers/hil/hilkbd.c * * Copyright (C) 1998 Philip Blundell <philb@gnu.org> * Copyright (C) 1999 Matthew Wilcox <willy@bofh.ai> * Copyright (C) 1999-2007 Helge Deller <deller@gmx.de> * * Very basic HP Human Interface Loop (HIL) driver. * This driver handles the keyboard on HP300 (m68k) and on some * HP700 (parisc) series machines. * * * This file is subject to the terms and conditions of the GNU General Public * License version 2. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/pci_ids.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/input.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hil.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <asm/irq.h> #ifdef CONFIG_HP300 #include <asm/hwtest.h> #endif MODULE_AUTHOR("Philip Blundell, Matthew Wilcox, Helge Deller"); MODULE_DESCRIPTION("HIL keyboard driver (basic functionality)"); MODULE_LICENSE("GPL v2"); #if defined(CONFIG_PARISC) #include <asm/io.h> #include <asm/hardware.h> #include <asm/parisc-device.h> static unsigned long hil_base; /* HPA for the HIL device */ static unsigned int hil_irq; #define HILBASE hil_base /* HPPA (parisc) port address */ #define HIL_DATA 0x800 #define HIL_CMD 0x801 #define HIL_IRQ hil_irq #define hil_readb(p) gsc_readb(p) #define hil_writeb(v,p) gsc_writeb((v),(p)) #elif defined(CONFIG_HP300) #define HILBASE 0xf0428000UL /* HP300 (m68k) port address */ #define HIL_DATA 0x1 #define HIL_CMD 0x3 #define HIL_IRQ 2 #define hil_readb(p) readb(p) #define hil_writeb(v,p) writeb((v),(p)) #else #error "HIL is not supported on this platform" #endif /* HIL helper functions */ #define hil_busy() (hil_readb(HILBASE + HIL_CMD) & HIL_BUSY) #define hil_data_available() (hil_readb(HILBASE + HIL_CMD) & HIL_DATA_RDY) #define hil_status() (hil_readb(HILBASE + HIL_CMD)) #define hil_command(x) do { hil_writeb((x), HILBASE + HIL_CMD); } while (0) #define hil_read_data() (hil_readb(HILBASE + HIL_DATA)) #define hil_write_data(x) do { hil_writeb((x), HILBASE + HIL_DATA); } while (0) /* HIL constants */ #define HIL_BUSY 0x02 #define HIL_DATA_RDY 0x01 #define HIL_SETARD 0xA0 /* set auto-repeat delay */ #define HIL_SETARR 0xA2 /* set auto-repeat rate */ #define HIL_SETTONE 0xA3 /* set tone generator */ #define HIL_CNMT 0xB2 /* clear nmi */ #define HIL_INTON 0x5C /* Turn on interrupts. */ #define HIL_INTOFF 0x5D /* Turn off interrupts. */ #define HIL_READKBDSADR 0xF9 #define HIL_WRITEKBDSADR 0xE9 static unsigned int hphilkeyb_keycode[HIL_KEYCODES_SET1_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET1 }; /* HIL structure */ static struct { struct input_dev *dev; unsigned int curdev; unsigned char s; unsigned char c; int valid; unsigned char data[16]; unsigned int ptr; spinlock_t lock; void *dev_id; /* native bus device */ } hil_dev; static void poll_finished(void) { int down; int key; unsigned char scode; switch (hil_dev.data[0]) { case 0x40: down = (hil_dev.data[1] & 1) == 0; scode = hil_dev.data[1] >> 1; key = hphilkeyb_keycode[scode]; input_report_key(hil_dev.dev, key, down); break; } hil_dev.curdev = 0; } static inline void handle_status(unsigned char s, unsigned char c) { if (c & 0x8) { /* End of block */ if (c & 0x10) poll_finished(); } else { if (c & 0x10) { if (hil_dev.curdev) poll_finished(); /* just in case */ hil_dev.curdev = c & 7; hil_dev.ptr = 0; } } } static inline void handle_data(unsigned char s, unsigned char c) { if (hil_dev.curdev) { hil_dev.data[hil_dev.ptr++] = c; hil_dev.ptr &= 15; } } /* handle HIL interrupts */ static irqreturn_t hil_interrupt(int irq, void *handle) { unsigned char s, c; s = hil_status(); c = hil_read_data(); switch (s >> 4) { case 0x5: handle_status(s, c); break; case 0x6: handle_data(s, c); break; case 0x4: hil_dev.s = s; hil_dev.c = c; mb(); hil_dev.valid = 1; break; } return IRQ_HANDLED; } /* send a command to the HIL */ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len) { unsigned long flags; spin_lock_irqsave(&hil_dev.lock, flags); while (hil_busy()) /* wait */; hil_command(cmd); while (len--) { while (hil_busy()) /* wait */; hil_write_data(*(data++)); } spin_unlock_irqrestore(&hil_dev.lock, flags); } /* initialize HIL */ static int __devinit hil_keyb_init(void) { unsigned char c; unsigned int i, kbid; wait_queue_head_t hil_wait; int err; if (hil_dev.dev) return -ENODEV; /* already initialized */ init_waitqueue_head(&hil_wait); spin_lock_init(&hil_dev.lock); hil_dev.dev = input_allocate_device(); if (!hil_dev.dev) return -ENOMEM; err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id); if (err) { printk(KERN_ERR "HIL: Can't get IRQ\n"); goto err1; } /* Turn on interrupts */ hil_do(HIL_INTON, NULL, 0); /* Look for keyboards */ hil_dev.valid = 0; /* clear any pending data */ hil_do(HIL_READKBDSADR, NULL, 0); wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3 * HZ); if (!hil_dev.valid) printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n"); c = hil_dev.c; hil_dev.valid = 0; if (c == 0) { kbid = -1; printk(KERN_WARNING "HIL: no keyboard present\n"); } else { kbid = ffz(~c); printk(KERN_INFO "HIL: keyboard found at id %d\n", kbid); } /* set it to raw mode */ c = 0; hil_do(HIL_WRITEKBDSADR, &c, 1); for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++) if (hphilkeyb_keycode[i] != KEY_RESERVED) __set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | BIT_MASK(LED_SCROLLL); hil_dev.dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE; hil_dev.dev->keycodesize= sizeof(hphilkeyb_keycode[0]); hil_dev.dev->keycode = hphilkeyb_keycode; hil_dev.dev->name = "HIL keyboard"; hil_dev.dev->phys = "hpkbd/input0"; hil_dev.dev->id.bustype = BUS_HIL; hil_dev.dev->id.vendor = PCI_VENDOR_ID_HP; hil_dev.dev->id.product = 0x0001; hil_dev.dev->id.version = 0x0010; err = input_register_device(hil_dev.dev); if (err) { printk(KERN_ERR "HIL: Can't register device\n"); goto err2; } printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n", hil_dev.dev->name, kbid, HILBASE, HIL_IRQ); return 0; err2: hil_do(HIL_INTOFF, NULL, 0); free_irq(HIL_IRQ, hil_dev.dev_id); err1: input_free_device(hil_dev.dev); hil_dev.dev = NULL; return err; } static void __devexit hil_keyb_exit(void) { if (HIL_IRQ) free_irq(HIL_IRQ, hil_dev.dev_id); /* Turn off interrupts */ hil_do(HIL_INTOFF, NULL, 0); input_unregister_device(hil_dev.dev); hil_dev.dev = NULL; } #if defined(CONFIG_PARISC) static int __devinit hil_probe_chip(struct parisc_device *dev) { /* Only allow one HIL keyboard */ if (hil_dev.dev) return -ENODEV; if (!dev->irq) { printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%p\n", (void *)dev->hpa.start); return -ENODEV; } hil_base = dev->hpa.start; hil_irq = dev->irq; hil_dev.dev_id = dev; printk(KERN_INFO "Found HIL bus at 0x%08lx, IRQ %d\n", hil_base, hil_irq); return hil_keyb_init(); } static int __devexit hil_remove_chip(struct parisc_device *dev) { hil_keyb_exit(); return 0; } static struct parisc_device_id hil_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 }, { 0, } }; #if 0 /* Disabled to avoid conflicts with the HP SDC HIL drivers */ MODULE_DEVICE_TABLE(parisc, hil_tbl); #endif static struct parisc_driver hil_driver = { .name = "hil", .id_table = hil_tbl, .probe = hil_probe_chip, .remove = __devexit_p(hil_remove_chip), }; static int __init hil_init(void) { return register_parisc_driver(&hil_driver); } static void __exit hil_exit(void) { unregister_parisc_driver(&hil_driver); } #else /* !CONFIG_PARISC */ static int __init hil_init(void) { int error; /* Only allow one HIL keyboard */ if (hil_dev.dev) return -EBUSY; if (!MACH_IS_HP300) return -ENODEV; if (!hwreg_present((void *)(HILBASE + HIL_DATA))) { printk(KERN_ERR "HIL: hardware register was not found\n"); return -ENODEV; } if (!request_region(HILBASE + HIL_DATA, 2, "hil")) { printk(KERN_ERR "HIL: IOPORT region already used\n"); return -EIO; } error = hil_keyb_init(); if (error) { release_region(HILBASE + HIL_DATA, 2); return error; } return 0; } static void __exit hil_exit(void) { hil_keyb_exit(); release_region(HILBASE + HIL_DATA, 2); } #endif /* CONFIG_PARISC */ module_init(hil_init); module_exit(hil_exit);
gpl-2.0
ReconInstruments/jet_kernel
net/ipv4/tcp_scalable.c
10556
1448
/* Tom Kelly's Scalable TCP * * See http://www.deneholme.net/tom/scalable/ * * John Heffner <jheffner@sc.edu> */ #include <linux/module.h> #include <net/tcp.h> /* These factors derived from the recommended values in the aer: * .01 and and 7/8. We use 50 instead of 100 to account for * delayed ack. */ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); else tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); } static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .cong_avoid = tcp_scalable_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, .owner = THIS_MODULE, .name = "scalable", }; static int __init tcp_scalable_register(void) { return tcp_register_congestion_control(&tcp_scalable); } static void __exit tcp_scalable_unregister(void) { tcp_unregister_congestion_control(&tcp_scalable); } module_init(tcp_scalable_register); module_exit(tcp_scalable_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Scalable TCP");
gpl-2.0
sheshas/HGSHM
qemu-2.3.0-rc3/hw/pcmcia/pxa2xx.c
61
6925
/* * Intel XScale PXA255/270 PC Card and CompactFlash Interface. * * Copyright (c) 2006 Openedhand Ltd. * Written by Andrzej Zaborowski <balrog@zabor.org> * * This code is licensed under the GPLv2. * * Contributions after 2012-01-13 are licensed under the terms of the * GNU GPL, version 2 or (at your option) any later version. */ #include "hw/hw.h" #include "hw/sysbus.h" #include "hw/pcmcia.h" #include "hw/arm/pxa.h" #define TYPE_PXA2XX_PCMCIA "pxa2xx-pcmcia" #define PXA2XX_PCMCIA(obj) \ OBJECT_CHECK(PXA2xxPCMCIAState, obj, TYPE_PXA2XX_PCMCIA) struct PXA2xxPCMCIAState { SysBusDevice parent_obj; PCMCIASocket slot; MemoryRegion container_mem; MemoryRegion common_iomem; MemoryRegion attr_iomem; MemoryRegion iomem; qemu_irq irq; qemu_irq cd_irq; PCMCIACardState *card; }; static uint64_t pxa2xx_pcmcia_common_read(void *opaque, hwaddr offset, unsigned size) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { pcc = PCMCIA_CARD_GET_CLASS(s->card); return pcc->common_read(s->card, offset); } return 0; } static void pxa2xx_pcmcia_common_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { pcc = PCMCIA_CARD_GET_CLASS(s->card); pcc->common_write(s->card, offset, value); } } static uint64_t pxa2xx_pcmcia_attr_read(void *opaque, hwaddr offset, unsigned size) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { pcc = PCMCIA_CARD_GET_CLASS(s->card); return pcc->attr_read(s->card, offset); } return 0; } static void pxa2xx_pcmcia_attr_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { pcc = PCMCIA_CARD_GET_CLASS(s->card); pcc->attr_write(s->card, offset, value); } } static uint64_t pxa2xx_pcmcia_io_read(void *opaque, hwaddr offset, unsigned size) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { pcc = PCMCIA_CARD_GET_CLASS(s->card); return pcc->io_read(s->card, offset); } return 0; } static void pxa2xx_pcmcia_io_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { pcc = PCMCIA_CARD_GET_CLASS(s->card); pcc->io_write(s->card, offset, value); } } static const MemoryRegionOps pxa2xx_pcmcia_common_ops = { .read = pxa2xx_pcmcia_common_read, .write = pxa2xx_pcmcia_common_write, .endianness = DEVICE_NATIVE_ENDIAN }; static const MemoryRegionOps pxa2xx_pcmcia_attr_ops = { .read = pxa2xx_pcmcia_attr_read, .write = pxa2xx_pcmcia_attr_write, .endianness = DEVICE_NATIVE_ENDIAN }; static const MemoryRegionOps pxa2xx_pcmcia_io_ops = { .read = pxa2xx_pcmcia_io_read, .write = pxa2xx_pcmcia_io_write, .endianness = DEVICE_NATIVE_ENDIAN }; static void pxa2xx_pcmcia_set_irq(void *opaque, int line, int level) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; if (!s->irq) return; qemu_set_irq(s->irq, level); } PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem, hwaddr base) { DeviceState *dev; PXA2xxPCMCIAState *s; dev = qdev_create(NULL, TYPE_PXA2XX_PCMCIA); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); s = PXA2XX_PCMCIA(dev); qdev_init_nofail(dev); return s; } static void pxa2xx_pcmcia_initfn(Object *obj) { SysBusDevice *sbd = SYS_BUS_DEVICE(obj); PXA2xxPCMCIAState *s = PXA2XX_PCMCIA(obj); memory_region_init(&s->container_mem, obj, "container", 0x10000000); sysbus_init_mmio(sbd, &s->container_mem); /* Socket I/O Memory Space */ memory_region_init_io(&s->iomem, NULL, &pxa2xx_pcmcia_io_ops, s, "pxa2xx-pcmcia-io", 0x04000000); memory_region_add_subregion(&s->container_mem, 0x00000000, &s->iomem); /* Then next 64 MB is reserved */ /* Socket Attribute Memory Space */ memory_region_init_io(&s->attr_iomem, NULL, &pxa2xx_pcmcia_attr_ops, s, "pxa2xx-pcmcia-attribute", 0x04000000); memory_region_add_subregion(&s->container_mem, 0x08000000, &s->attr_iomem); /* Socket Common Memory Space */ memory_region_init_io(&s->common_iomem, NULL, &pxa2xx_pcmcia_common_ops, s, "pxa2xx-pcmcia-common", 0x04000000); memory_region_add_subregion(&s->container_mem, 0x0c000000, &s->common_iomem); s->slot.irq = qemu_allocate_irq(pxa2xx_pcmcia_set_irq, s, 0); object_property_add_link(obj, "card", TYPE_PCMCIA_CARD, (Object **)&s->card, NULL, /* read-only property */ 0, NULL); } /* Insert a new card into a slot */ int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (s->slot.attached) { return -EEXIST; } if (s->cd_irq) { qemu_irq_raise(s->cd_irq); } s->card = card; pcc = PCMCIA_CARD_GET_CLASS(s->card); s->slot.attached = true; s->card->slot = &s->slot; pcc->attach(s->card); return 0; } /* Eject card from the slot */ int pxa2xx_pcmcia_detach(void *opaque) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; PCMCIACardClass *pcc; if (!s->slot.attached) { return -ENOENT; } pcc = PCMCIA_CARD_GET_CLASS(s->card); pcc->detach(s->card); s->card->slot = NULL; s->card = NULL; s->slot.attached = false; if (s->irq) { qemu_irq_lower(s->irq); } if (s->cd_irq) { qemu_irq_lower(s->cd_irq); } return 0; } /* Who to notify on card events */ void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq) { PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; s->irq = irq; s->cd_irq = cd_irq; } static const TypeInfo pxa2xx_pcmcia_type_info = { .name = TYPE_PXA2XX_PCMCIA, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PXA2xxPCMCIAState), .instance_init = pxa2xx_pcmcia_initfn, }; static void pxa2xx_pcmcia_register_types(void) { type_register_static(&pxa2xx_pcmcia_type_info); } type_init(pxa2xx_pcmcia_register_types)
gpl-2.0
frustreated/linux
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
317
13513
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/random.h> #include <net/vxlan.h> #include "reg.h" #include "spectrum.h" #include "spectrum_nve.h" #define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_LEARN) #define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \ VXLAN_F_UDP_ZERO_CSUM6_TX | \ VXLAN_F_UDP_ZERO_CSUM6_RX) static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg, struct netlink_ext_ack *extack) { if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX"); return false; } if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); return false; } return true; } static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg, struct netlink_ext_ack *extack) { if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX"); return false; } if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX"); return false; } if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); return false; } return true; } static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; if (vxlan_addr_multicast(&cfg->remote_ip)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported"); return false; } if (vxlan_addr_any(&cfg->saddr)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified"); return false; } if (cfg->remote_ifindex) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported"); return false; } if (cfg->port_min || cfg->port_max) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported"); return false; } if (cfg->tos != 1) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit"); return false; } if (cfg->flags & VXLAN_F_TTL_INHERIT) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit"); return false; } switch (cfg->saddr.sa.sa_family) { case AF_INET: if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack)) return false; break; case AF_INET6: if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack)) return false; break; } if (cfg->ttl == 0) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0"); return false; } if (cfg->label != 0) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0"); return false; } return true; } static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) { if (params->ethertype == ETH_P_8021AD) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN"); return false; } return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack); } static void mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg, struct mlxsw_sp_nve_config *config) { switch (cfg->saddr.sa.sa_family) { case AF_INET: config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; break; case AF_INET6: config->ul_proto = MLXSW_SP_L3_PROTO_IPV6; config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr; break; } } static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config) { struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; config->type = MLXSW_SP_NVE_TYPE_VXLAN; config->ttl = cfg->ttl; config->flowlabel = cfg->label; config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0; config->ul_tb_id = RT_TABLE_MAIN; mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config); config->udp_dport = cfg->dst_port; } static void mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, const struct mlxsw_sp_nve_config *config) { struct in6_addr addr6; u8 udp_sport; mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, config->ttl); /* VxLAN driver's default UDP source port range is 32768 (0x8000) * to 60999 (0xee47). Set the upper 8 bits of the UDP source port * to a random number between 0x80 and 0xee */ get_random_bytes(&udp_sport, sizeof(udp_sport)); udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80; mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport); switch (config->ul_proto) { case MLXSW_SP_L3_PROTO_IPV4: mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4)); break; case MLXSW_SP_L3_PROTO_IPV6: addr6 = config->ul_sip.addr6; mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl, (const char *)&addr6); break; } } static int mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nve_config *config) { char tngcr_pl[MLXSW_REG_TNGCR_LEN]; u16 ul_vr_id; int err; err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id, &ul_vr_id); if (err) return err; mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config); mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en); mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); } static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) { char tngcr_pl[MLXSW_REG_TNGCR_LEN]; mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); } static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp, unsigned int tunnel_index) { char rtdp_pl[MLXSW_REG_RTDP_LEN]; mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); } static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config) { struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; int err; err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport); if (err) return err; err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); if (err) goto err_parsing_depth_inc; err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config); if (err) goto err_config_set; err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index); if (err) goto err_rtdp_set; err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip, nve->tunnel_index); if (err) goto err_promote_decap; return 0; err_promote_decap: err_rtdp_set: mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); err_config_set: mlxsw_sp_parsing_depth_dec(mlxsw_sp); err_parsing_depth_inc: mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); return err; } static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) { struct mlxsw_sp_nve_config *config = &nve->config; struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip); mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); mlxsw_sp_parsing_depth_dec(mlxsw_sp); mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); } static int mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni, struct netlink_ext_ack *extack) { if (WARN_ON(!netif_is_vxlan(nve_dev))) return -EINVAL; return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier, extack); } static void mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni) { if (WARN_ON(!netif_is_vxlan(nve_dev))) return; vxlan_fdb_clear_offload(nve_dev, vni); } const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, .can_offload = mlxsw_sp1_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp1_nve_vxlan_init, .fini = mlxsw_sp1_nve_vxlan_fini, .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay, .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, bool learning_en) { char tnpc_pl[MLXSW_REG_TNPC_LEN]; mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE, learning_en); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl); } static int mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp) { char spvid_pl[MLXSW_REG_SPVID_LEN] = {}; mlxsw_reg_spvid_tport_set(spvid_pl, true); mlxsw_reg_spvid_local_port_set(spvid_pl, MLXSW_REG_TUNNEL_PORT_NVE); mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); } static int mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nve_config *config) { char tngcr_pl[MLXSW_REG_TNGCR_LEN]; char spvtr_pl[MLXSW_REG_SPVTR_LEN]; u16 ul_rif_index; int err; err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id, &ul_rif_index); if (err) return err; mlxsw_sp->nve->ul_rif_index = ul_rif_index; err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en); if (err) goto err_vxlan_learning_set; mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config); mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); if (err) goto err_tngcr_write; mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); if (err) goto err_spvtr_write; err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp); if (err) goto err_decap_ethertype_set; return 0; err_decap_ethertype_set: mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); err_spvtr_write: mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); err_tngcr_write: mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); err_vxlan_learning_set: mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index); return err; } static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) { char spvtr_pl[MLXSW_REG_SPVTR_LEN]; char tngcr_pl[MLXSW_REG_TNGCR_LEN]; mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index); } static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp, unsigned int tunnel_index, u16 ul_rif_index) { char rtdp_pl[MLXSW_REG_RTDP_LEN]; mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index); mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); } static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config) { struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; int err; err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport); if (err) return err; err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); if (err) goto err_parsing_depth_inc; err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config); if (err) goto err_config_set; err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index, nve->ul_rif_index); if (err) goto err_rtdp_set; err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip, nve->tunnel_index); if (err) goto err_promote_decap; return 0; err_promote_decap: err_rtdp_set: mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); err_config_set: mlxsw_sp_parsing_depth_dec(mlxsw_sp); err_parsing_depth_inc: mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); return err; } static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) { struct mlxsw_sp_nve_config *config = &nve->config; struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip); mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); mlxsw_sp_parsing_depth_dec(mlxsw_sp); mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); } const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, .can_offload = mlxsw_sp_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp2_nve_vxlan_init, .fini = mlxsw_sp2_nve_vxlan_fini, .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay, .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, };
gpl-2.0
quinte17/linux-stable
arch/arm/mach-pxa/mfp-pxa2xx.c
573
10074
/* * linux/arch/arm/mach-pxa/mfp-pxa2xx.c * * PXA2xx pin mux configuration support * * The GPIOs on PXA2xx can be configured as one of many alternate * functions, this is by concept samilar to the MFP configuration * on PXA3xx, what's more important, the low power pin state and * wakeup detection are also supported by the same framework. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <mach/pxa2xx-regs.h> #include "mfp-pxa2xx.h" #include "generic.h" #define PGSR(x) __REG2(0x40F00020, (x) << 2) #define __GAFR(u, x) __REG2((u) ? 0x40E00058 : 0x40E00054, (x) << 3) #define GAFR_L(x) __GAFR(0, x) #define GAFR_U(x) __GAFR(1, x) #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) #define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5)) #define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c) #define GPSR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18) #define GPCR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24) #define PWER_WE35 (1 << 24) struct gpio_desc { unsigned valid : 1; unsigned can_wakeup : 1; unsigned keypad_gpio : 1; unsigned dir_inverted : 1; unsigned int mask; /* bit mask in PWER or PKWR */ unsigned int mux_mask; /* bit mask of muxed gpio bits, 0 if no mux */ unsigned long config; }; static struct gpio_desc gpio_desc[MFP_PIN_GPIO127 + 1]; static unsigned long gpdr_lpm[4]; static int __mfp_config_gpio(unsigned gpio, unsigned long c) { unsigned long gafr, mask = GPIO_bit(gpio); int bank = gpio_to_bank(gpio); int uorl = !!(gpio & 0x10); /* GAFRx_U or GAFRx_L ? */ int shft = (gpio & 0xf) << 1; int fn = MFP_AF(c); int is_out = (c & MFP_DIR_OUT) ? 1 : 0; if (fn > 3) return -EINVAL; /* alternate function and direction at run-time */ gafr = (uorl == 0) ? GAFR_L(bank) : GAFR_U(bank); gafr = (gafr & ~(0x3 << shft)) | (fn << shft); if (uorl == 0) GAFR_L(bank) = gafr; else GAFR_U(bank) = gafr; if (is_out ^ gpio_desc[gpio].dir_inverted) GPDR(gpio) |= mask; else GPDR(gpio) &= ~mask; /* alternate function and direction at low power mode */ switch (c & MFP_LPM_STATE_MASK) { case MFP_LPM_DRIVE_HIGH: PGSR(bank) |= mask; is_out = 1; break; case MFP_LPM_DRIVE_LOW: PGSR(bank) &= ~mask; is_out = 1; break; case MFP_LPM_INPUT: case MFP_LPM_DEFAULT: break; default: /* warning and fall through, treat as MFP_LPM_DEFAULT */ pr_warn("%s: GPIO%d: unsupported low power mode\n", __func__, gpio); break; } if (is_out ^ gpio_desc[gpio].dir_inverted) gpdr_lpm[bank] |= mask; else gpdr_lpm[bank] &= ~mask; /* give early warning if MFP_LPM_CAN_WAKEUP is set on the * configurations of those pins not able to wakeup */ if ((c & MFP_LPM_CAN_WAKEUP) && !gpio_desc[gpio].can_wakeup) { pr_warn("%s: GPIO%d unable to wakeup\n", __func__, gpio); return -EINVAL; } if ((c & MFP_LPM_CAN_WAKEUP) && is_out) { pr_warn("%s: output GPIO%d unable to wakeup\n", __func__, gpio); return -EINVAL; } return 0; } static inline int __mfp_validate(int mfp) { int gpio = mfp_to_gpio(mfp); if ((mfp > MFP_PIN_GPIO127) || !gpio_desc[gpio].valid) { pr_warn("%s: GPIO%d is invalid pin\n", __func__, gpio); return -1; } return gpio; } void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) { unsigned long flags; unsigned long *c; int i, gpio; for (i = 0, c = mfp_cfgs; i < num; i++, c++) { gpio = __mfp_validate(MFP_PIN(*c)); if (gpio < 0) continue; local_irq_save(flags); gpio_desc[gpio].config = *c; __mfp_config_gpio(gpio, *c); local_irq_restore(flags); } } void pxa2xx_mfp_set_lpm(int mfp, unsigned long lpm) { unsigned long flags, c; int gpio; gpio = __mfp_validate(mfp); if (gpio < 0) return; local_irq_save(flags); c = gpio_desc[gpio].config; c = (c & ~MFP_LPM_STATE_MASK) | lpm; __mfp_config_gpio(gpio, c); local_irq_restore(flags); } int gpio_set_wake(unsigned int gpio, unsigned int on) { struct gpio_desc *d; unsigned long c, mux_taken; if (gpio > mfp_to_gpio(MFP_PIN_GPIO127)) return -EINVAL; d = &gpio_desc[gpio]; c = d->config; if (!d->valid) return -EINVAL; /* Allow keypad GPIOs to wakeup system when * configured as generic GPIOs. */ if (d->keypad_gpio && (MFP_AF(d->config) == 0) && (d->config & MFP_LPM_CAN_WAKEUP)) { if (on) PKWR |= d->mask; else PKWR &= ~d->mask; return 0; } mux_taken = (PWER & d->mux_mask) & (~d->mask); if (on && mux_taken) return -EBUSY; if (d->can_wakeup && (c & MFP_LPM_CAN_WAKEUP)) { if (on) { PWER = (PWER & ~d->mux_mask) | d->mask; if (c & MFP_LPM_EDGE_RISE) PRER |= d->mask; else PRER &= ~d->mask; if (c & MFP_LPM_EDGE_FALL) PFER |= d->mask; else PFER &= ~d->mask; } else { PWER &= ~d->mask; PRER &= ~d->mask; PFER &= ~d->mask; } } return 0; } #ifdef CONFIG_PXA25x static void __init pxa25x_mfp_init(void) { int i; /* running before pxa_gpio_probe() */ #ifdef CONFIG_CPU_PXA26x pxa_last_gpio = 89; #else pxa_last_gpio = 84; #endif for (i = 0; i <= pxa_last_gpio; i++) gpio_desc[i].valid = 1; for (i = 0; i <= 15; i++) { gpio_desc[i].can_wakeup = 1; gpio_desc[i].mask = GPIO_bit(i); } /* PXA26x has additional 4 GPIOs (86/87/88/89) which has the * direction bit inverted in GPDR2. See PXA26x DM 4.1.1. */ for (i = 86; i <= pxa_last_gpio; i++) gpio_desc[i].dir_inverted = 1; } #else static inline void pxa25x_mfp_init(void) {} #endif /* CONFIG_PXA25x */ #ifdef CONFIG_PXA27x static int pxa27x_pkwr_gpio[] = { 13, 16, 17, 34, 36, 37, 38, 39, 90, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102 }; int keypad_set_wake(unsigned int on) { unsigned int i, gpio, mask = 0; struct gpio_desc *d; for (i = 0; i < ARRAY_SIZE(pxa27x_pkwr_gpio); i++) { gpio = pxa27x_pkwr_gpio[i]; d = &gpio_desc[gpio]; /* skip if configured as generic GPIO */ if (MFP_AF(d->config) == 0) continue; if (d->config & MFP_LPM_CAN_WAKEUP) mask |= gpio_desc[gpio].mask; } if (on) PKWR |= mask; else PKWR &= ~mask; return 0; } #define PWER_WEMUX2_GPIO38 (1 << 16) #define PWER_WEMUX2_GPIO53 (2 << 16) #define PWER_WEMUX2_GPIO40 (3 << 16) #define PWER_WEMUX2_GPIO36 (4 << 16) #define PWER_WEMUX2_MASK (7 << 16) #define PWER_WEMUX3_GPIO31 (1 << 19) #define PWER_WEMUX3_GPIO113 (2 << 19) #define PWER_WEMUX3_MASK (3 << 19) #define INIT_GPIO_DESC_MUXED(mux, gpio) \ do { \ gpio_desc[(gpio)].can_wakeup = 1; \ gpio_desc[(gpio)].mask = PWER_ ## mux ## _GPIO ##gpio; \ gpio_desc[(gpio)].mux_mask = PWER_ ## mux ## _MASK; \ } while (0) static void __init pxa27x_mfp_init(void) { int i, gpio; pxa_last_gpio = 120; /* running before pxa_gpio_probe() */ for (i = 0; i <= pxa_last_gpio; i++) { /* skip GPIO2, 5, 6, 7, 8, they are not * valid pins allow configuration */ if (i == 2 || i == 5 || i == 6 || i == 7 || i == 8) continue; gpio_desc[i].valid = 1; } /* Keypad GPIOs */ for (i = 0; i < ARRAY_SIZE(pxa27x_pkwr_gpio); i++) { gpio = pxa27x_pkwr_gpio[i]; gpio_desc[gpio].can_wakeup = 1; gpio_desc[gpio].keypad_gpio = 1; gpio_desc[gpio].mask = 1 << i; } /* Overwrite GPIO13 as a PWER wakeup source */ for (i = 0; i <= 15; i++) { /* skip GPIO2, 5, 6, 7, 8 */ if (GPIO_bit(i) & 0x1e4) continue; gpio_desc[i].can_wakeup = 1; gpio_desc[i].mask = GPIO_bit(i); } gpio_desc[35].can_wakeup = 1; gpio_desc[35].mask = PWER_WE35; INIT_GPIO_DESC_MUXED(WEMUX3, 31); INIT_GPIO_DESC_MUXED(WEMUX3, 113); INIT_GPIO_DESC_MUXED(WEMUX2, 38); INIT_GPIO_DESC_MUXED(WEMUX2, 53); INIT_GPIO_DESC_MUXED(WEMUX2, 40); INIT_GPIO_DESC_MUXED(WEMUX2, 36); } #else static inline void pxa27x_mfp_init(void) {} #endif /* CONFIG_PXA27x */ #ifdef CONFIG_PM static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; static unsigned long saved_gplr[4]; static unsigned long saved_pgsr[4]; static int pxa2xx_mfp_suspend(void) { int i; /* set corresponding PGSR bit of those marked MFP_LPM_KEEP_OUTPUT */ for (i = 0; i < pxa_last_gpio; i++) { if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && (GPDR(i) & GPIO_bit(i))) { if (GPLR(i) & GPIO_bit(i)) PGSR(gpio_to_bank(i)) |= GPIO_bit(i); else PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i); } } for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { saved_gafr[0][i] = GAFR_L(i); saved_gafr[1][i] = GAFR_U(i); saved_gpdr[i] = GPDR(i * 32); saved_gplr[i] = GPLR(i * 32); saved_pgsr[i] = PGSR(i); GPSR(i * 32) = PGSR(i); GPCR(i * 32) = ~PGSR(i); } /* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */ for (i = 0; i < pxa_last_gpio; i++) { if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) || ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && (saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i)))) GPDR(i) |= GPIO_bit(i); else GPDR(i) &= ~GPIO_bit(i); } return 0; } static void pxa2xx_mfp_resume(void) { int i; for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { GAFR_L(i) = saved_gafr[0][i]; GAFR_U(i) = saved_gafr[1][i]; GPSR(i * 32) = saved_gplr[i]; GPCR(i * 32) = ~saved_gplr[i]; GPDR(i * 32) = saved_gpdr[i]; PGSR(i) = saved_pgsr[i]; } PSSR = PSSR_RDH | PSSR_PH; } #else #define pxa2xx_mfp_suspend NULL #define pxa2xx_mfp_resume NULL #endif struct syscore_ops pxa2xx_mfp_syscore_ops = { .suspend = pxa2xx_mfp_suspend, .resume = pxa2xx_mfp_resume, }; static int __init pxa2xx_mfp_init(void) { int i; if (!cpu_is_pxa2xx()) return 0; if (cpu_is_pxa25x()) pxa25x_mfp_init(); if (cpu_is_pxa27x()) pxa27x_mfp_init(); /* clear RDH bit to enable GPIO receivers after reset/sleep exit */ PSSR = PSSR_RDH; /* initialize gafr_run[], pgsr_lpm[] from existing values */ for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) gpdr_lpm[i] = GPDR(i * 32); return 0; } postcore_initcall(pxa2xx_mfp_init);
gpl-2.0
mer-hybris/android_kernel_motorola_titan-OLD
drivers/acpi/processor_idle.c
829
33939
/* * processor_idle - idle state submodule to the ACPI processor driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * - Added support for C3 on SMP * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/moduleparam.h> #include <linux/sched.h> /* need_resched() */ #include <linux/pm_qos.h> #include <linux/clockchips.h> #include <linux/cpuidle.h> #include <linux/irqflags.h> /* * Include the apic definitions for x86 to have the APIC timer related defines * available also for UP (on SMP it gets magically included via linux/smp.h). * asm/acpi.h is not an option, as it would require more include magic. Also * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. */ #ifdef CONFIG_X86 #include <asm/apic.h> #endif #include <asm/io.h> #include <asm/uaccess.h> #include <acpi/acpi_bus.h> #include <acpi/processor.h> #include <asm/processor.h> #define PREFIX "ACPI: " #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) #define C2_OVERHEAD 1 /* 1us */ #define C3_OVERHEAD 1 /* 1us */ #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; module_param(max_cstate, uint, 0000); static unsigned int nocst __read_mostly; module_param(nocst, uint, 0000); static int bm_check_disable __read_mostly; module_param(bm_check_disable, uint, 0000); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); static int disabled_by_idle_boot_param(void) { return boot_option_idle_override == IDLE_POLL || boot_option_idle_override == IDLE_FORCE_MWAIT || boot_option_idle_override == IDLE_HALT; } /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. * For now disable this. Probably a bug somewhere else. * * To skip this limit, boot/load with a large max_cstate limit. */ static int set_max_cstate(const struct dmi_system_id *id) { if (max_cstate > ACPI_PROCESSOR_MAX_POWER) return 0; printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." " Override with \"processor.max_cstate=%d\"\n", id->ident, (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); max_cstate = (long)id->driver_data; return 0; } /* Actually this shouldn't be __cpuinitdata, would be better to fix the callers to only run once -AK */ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { { set_max_cstate, "Clevo 5600D", { DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, (void *)2}, { set_max_cstate, "Pavilion zv5000", { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, (void *)1}, { set_max_cstate, "Asus L8400B", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, (void *)1}, {}, }; /* * Callers should disable interrupts before the call and enable * interrupts after return. */ static void acpi_safe_halt(void) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) { safe_halt(); local_irq_disable(); } current_thread_info()->status |= TS_POLLING; } #ifdef ARCH_APICTIMER_STOPS_ON_C3 /* * Some BIOS implementations switch to C3 in the published C2 state. * This seems to be a common problem on AMD boxen, but other vendors * are affected too. We pick the most conservative approach: we assume * that the local APIC stops in both C2 and C3. */ static void lapic_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_cx *cx) { struct acpi_processor_power *pwr = &pr->power; u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) return; if (amd_e400_c1e_detected) type = ACPI_STATE_C1; /* * Check, if one of the previous states already marked the lapic * unstable */ if (pwr->timer_broadcast_on_state < state) return; if (cx->type >= type) pr->power.timer_broadcast_on_state = state; } static void __lapic_timer_propagate_broadcast(void *arg) { struct acpi_processor *pr = (struct acpi_processor *) arg; unsigned long reason; reason = pr->power.timer_broadcast_on_state < INT_MAX ? CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; clockevents_notify(reason, &pr->id); } static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, (void *)pr, 1); } /* Power(C) State timer broadcast control */ static void lapic_timer_state_broadcast(struct acpi_processor *pr, struct acpi_processor_cx *cx, int broadcast) { int state = cx - pr->power.states; if (state >= pr->power.timer_broadcast_on_state) { unsigned long reason; reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : CLOCK_EVT_NOTIFY_BROADCAST_EXIT; clockevents_notify(reason, &pr->id); } } #else static void lapic_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_cx *cstate) { } static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } static void lapic_timer_state_broadcast(struct acpi_processor *pr, struct acpi_processor_cx *cx, int broadcast) { } #endif /* * Suspend / resume control */ static u32 saved_bm_rld; static void acpi_idle_bm_rld_save(void) { acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); } static void acpi_idle_bm_rld_restore(void) { u32 resumed_bm_rld; acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); if (resumed_bm_rld != saved_bm_rld) acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); } int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { acpi_idle_bm_rld_save(); return 0; } int acpi_processor_resume(struct acpi_device * device) { acpi_idle_bm_rld_restore(); return 0; } #if defined(CONFIG_X86) static void tsc_check_state(int state) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: case X86_VENDOR_INTEL: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. */ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) return; /*FALL THROUGH*/ default: /* TSC could halt in idle, so notify users */ if (state > ACPI_STATE_C1) mark_tsc_unstable("TSC halts in idle"); } } #else static void tsc_check_state(int state) { return; } #endif static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) { if (!pr) return -EINVAL; if (!pr->pblk) return -ENODEV; /* if info is obtained from pblk/fadt, type equals state */ pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; #ifndef CONFIG_HOTPLUG_CPU /* * Check for P_LVL2_UP flag before entering C2 and above on * an SMP system. */ if ((num_online_cpus() > 1) && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) return -ENODEV; #endif /* determine C2 and C3 address from pblk */ pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; /* determine latencies from FADT */ pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; /* * FADT specified C2 latency must be less than or equal to * 100 microseconds. */ if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency)); /* invalidate C2 */ pr->power.states[ACPI_STATE_C2].address = 0; } /* * FADT supplied C3 latency must be less than or equal to * 1000 microseconds. */ if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency)); /* invalidate C3 */ pr->power.states[ACPI_STATE_C3].address = 0; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "lvl2[0x%08x] lvl3[0x%08x]\n", pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); return 0; } static int acpi_processor_get_power_info_default(struct acpi_processor *pr) { if (!pr->power.states[ACPI_STATE_C1].valid) { /* set the first C-State to C1 */ /* all processors need to support C1 */ pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; pr->power.states[ACPI_STATE_C1].valid = 1; pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; } /* the C0 state only exists as a filler in our array */ pr->power.states[ACPI_STATE_C0].valid = 1; return 0; } static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { acpi_status status = 0; u64 count; int current_count; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *cst; if (nocst) return -ENODEV; current_count = 0; status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); return -ENODEV; } cst = buffer.pointer; /* There must be at least 2 elements */ if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { printk(KERN_ERR PREFIX "not enough elements in _CST\n"); status = -EFAULT; goto end; } count = cst->package.elements[0].integer.value; /* Validate number of power states. */ if (count < 1 || count != cst->package.count - 1) { printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); status = -EFAULT; goto end; } /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; for (i = 1; i <= count; i++) { union acpi_object *element; union acpi_object *obj; struct acpi_power_register *reg; struct acpi_processor_cx cx; memset(&cx, 0, sizeof(cx)); element = &(cst->package.elements[i]); if (element->type != ACPI_TYPE_PACKAGE) continue; if (element->package.count != 4) continue; obj = &(element->package.elements[0]); if (obj->type != ACPI_TYPE_BUFFER) continue; reg = (struct acpi_power_register *)obj->buffer.pointer; if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) continue; /* There should be an easy way to extract an integer... */ obj = &(element->package.elements[1]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.type = obj->integer.value; /* * Some buggy BIOSes won't list C1 in _CST - * Let acpi_processor_get_power_info_default() handle them later */ if (i == 1 && cx.type != ACPI_STATE_C1) current_count++; cx.address = reg->address; cx.index = current_count + 1; cx.entry_method = ACPI_CSTATE_SYSTEMIO; if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { if (acpi_processor_ffh_cstate_probe (pr->id, &cx, reg) == 0) { cx.entry_method = ACPI_CSTATE_FFH; } else if (cx.type == ACPI_STATE_C1) { /* * C1 is a special case where FIXED_HARDWARE * can be handled in non-MWAIT way as well. * In that case, save this _CST entry info. * Otherwise, ignore this info and continue. */ cx.entry_method = ACPI_CSTATE_HALT; snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); } else { continue; } if (cx.type == ACPI_STATE_C1 && (boot_option_idle_override == IDLE_NOMWAIT)) { /* * In most cases the C1 space_id obtained from * _CST object is FIXED_HARDWARE access mode. * But when the option of idle=halt is added, * the entry_method type should be changed from * CSTATE_FFH to CSTATE_HALT. * When the option of idle=nomwait is added, * the C1 entry_method type should be * CSTATE_HALT. */ cx.entry_method = ACPI_CSTATE_HALT; snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); } } else { snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", cx.address); } if (cx.type == ACPI_STATE_C1) { cx.valid = 1; } obj = &(element->package.elements[2]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.latency = obj->integer.value; obj = &(element->package.elements[3]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.power = obj->integer.value; current_count++; memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); /* * We support total ACPI_PROCESSOR_MAX_POWER - 1 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) */ if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER); printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); break; } } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", current_count)); /* Validate number of power states discovered */ if (current_count < 2) status = -EFAULT; end: kfree(buffer.pointer); return status; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, struct acpi_processor_cx *cx) { static int bm_check_flag = -1; static int bm_control_flag = -1; if (!cx->address) return; /* * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) * DMA transfers are used by any ISA device to avoid livelock. * Note that we could disable Type-F DMA (as recommended by * the erratum), but this is known to disrupt certain ISA * devices thus we take the conservative approach. */ else if (errata.piix4.fdma) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 not supported on PIIX4 with Type-F DMA\n")); return; } /* All the logic here assumes flags.bm_check is same across all CPUs */ if (bm_check_flag == -1) { /* Determine whether bm_check is needed based on CPU */ acpi_processor_power_init_bm_check(&(pr->flags), pr->id); bm_check_flag = pr->flags.bm_check; bm_control_flag = pr->flags.bm_control; } else { pr->flags.bm_check = bm_check_flag; pr->flags.bm_control = bm_control_flag; } if (pr->flags.bm_check) { if (!pr->flags.bm_control) { if (pr->flags.has_cst != 1) { /* bus mastering control is necessary */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 support requires BM control\n")); return; } else { /* Here we enter C3 without bus mastering */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 support without BM control\n")); } } } else { /* * WBINVD should be set in fadt, for C3 state to be * supported on when bm_check is not required. */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cache invalidation should work properly" " for C3 to be enabled on SMP systems\n")); return; } } /* * Otherwise we've met all of our C3 requirements. * Normalize the C3 latency to expidite policy. Enable * checking of bus mastering status (bm_check) so we can * use this in our C3 policy */ cx->valid = 1; cx->latency_ticks = cx->latency; /* * On older chipsets, BM_RLD needs to be set * in order for Bus Master activity to wake the * system from C3. Newer chipsets handle DMA * during C3 automatically and BM_RLD is a NOP. * In either case, the proper way to * handle BM_RLD is to set it and leave it set. */ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); return; } static int acpi_processor_power_verify(struct acpi_processor *pr) { unsigned int i; unsigned int working = 0; pr->power.timer_broadcast_on_state = INT_MAX; for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; switch (cx->type) { case ACPI_STATE_C1: cx->valid = 1; break; case ACPI_STATE_C2: if (!cx->address) break; cx->valid = 1; cx->latency_ticks = cx->latency; /* Normalize latency */ break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); break; } if (!cx->valid) continue; lapic_timer_check_state(i, pr, cx); tsc_check_state(cx->type); working++; } lapic_timer_propagate_broadcast(pr); return (working); } static int acpi_processor_get_power_info(struct acpi_processor *pr) { unsigned int i; int result; /* NOTE: the idle thread may not be running while calling * this function */ /* Zero initialize all the C-states info. */ memset(pr->power.states, 0, sizeof(pr->power.states)); result = acpi_processor_get_power_info_cst(pr); if (result == -ENODEV) result = acpi_processor_get_power_info_fadt(pr); if (result) return result; acpi_processor_get_power_info_default(pr); pr->power.count = acpi_processor_power_verify(pr); /* * if one state of type C2 or C3 is available, mark this * CPU as being "idle manageable" */ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { if (pr->power.states[i].valid) { pr->power.count = i; if (pr->power.states[i].type >= ACPI_STATE_C2) pr->flags.power = 1; } } return 0; } /** * acpi_idle_bm_check - checks if bus master activity was detected */ static int acpi_idle_bm_check(void) { u32 bm_status = 0; if (bm_check_disable) return 0; acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); if (bm_status) acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); /* * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect * the true state of bus mastering activity; forcing us to * manually check the BMIDEA bit of each IDE channel. */ else if (errata.piix4.bmisx) { if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) bm_status = 1; } return bm_status; } /** * acpi_idle_do_entry - a helper function that does C2 and C3 type entry * @cx: cstate data * * Caller disables interrupt before call and enables interrupt after return. */ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { /* Don't trace irqs off for idle */ stop_critical_timings(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); } else if (cx->entry_method == ACPI_CSTATE_HALT) { acpi_safe_halt(); } else { /* IO port based C-state */ inb(cx->address); /* Dummy wait op - must do something useless after P_LVL2 read because chipsets cannot guarantee that STPCLK# signal gets asserted in time to freeze execution properly. */ inl(acpi_gbl_FADT.xpm_timer_block.address); } start_critical_timings(); } /** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); /* Update device last_residency*/ dev->last_residency = (int)idle_time; local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return index; } /** * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) * @dev: the target CPU * @index: the index of suggested state */ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) { struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ACPI_FLUSH_CPU_CACHE(); while (1) { if (cx->entry_method == ACPI_CSTATE_HALT) safe_halt(); else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { inb(cx->address); /* See comment in acpi_idle_do_entry() */ inl(acpi_gbl_FADT.xpm_timer_block.address); } else return -ENODEV; } /* Never reached */ return 0; } /** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @drv: cpuidle driver with cpuidle state information * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); /* Update device last_residency*/ dev->last_residency = (int)idle_time; /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index; } static int c3_cpu_count; static DEFINE_RAW_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @drv: cpuidle driver containing state data * @index: the index of suggested state * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); return -EINVAL; } } local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } } acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); /* Update device last_residency*/ dev->last_residency = (int)idle_time; /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index; } struct cpuidle_driver acpi_idle_driver = { .name = "acpi_idle", .owner = THIS_MODULE, }; /** * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE * device i.e. per-cpu data * * @pr: the ACPI processor */ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; struct cpuidle_state_usage *state_usage; struct cpuidle_device *dev = &pr->power.dev; if (!pr->flags.power_setup_done) return -EINVAL; if (pr->flags.power == 0) { return -EINVAL; } if (!dev) return -EINVAL; dev->cpu = pr->id; if (max_cstate == 0) max_cstate = 1; for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; state_usage = &dev->states_usage[count]; if (!cx->valid) continue; #ifdef CONFIG_HOTPLUG_CPU if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif cpuidle_set_statedata(state_usage, cx); count++; if (count == CPUIDLE_STATE_MAX) break; } dev->state_count = count; if (!count) return -EINVAL; return 0; } /** * acpi_processor_setup_cpuidle states- prepares and configures cpuidle * global state data i.e. idle routines * * @pr: the ACPI processor */ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_driver *drv = &acpi_idle_driver; if (!pr->flags.power_setup_done) return -EINVAL; if (pr->flags.power == 0) return -EINVAL; drv->safe_state_index = -1; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } if (max_cstate == 0) max_cstate = 1; for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; if (!cx->valid) continue; #ifdef CONFIG_HOTPLUG_CPU if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif state = &drv->states[count]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; state->target_residency = cx->latency * latency_factor; state->flags = 0; switch (cx->type) { case ACPI_STATE_C1: if (cx->entry_method == ACPI_CSTATE_FFH) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_c1; state->enter_dead = acpi_idle_play_dead; drv->safe_state_index = count; break; case ACPI_STATE_C2: state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_simple; state->enter_dead = acpi_idle_play_dead; drv->safe_state_index = count; break; case ACPI_STATE_C3: state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = pr->flags.bm_check ? acpi_idle_enter_bm : acpi_idle_enter_simple; break; } count++; if (count == CPUIDLE_STATE_MAX) break; } drv->state_count = count; if (!count) return -EINVAL; return 0; } int acpi_processor_hotplug(struct acpi_processor *pr) { int ret = 0; if (disabled_by_idle_boot_param()) return 0; if (!pr) return -EINVAL; if (nocst) { return -ENODEV; } if (!pr->flags.power_setup_done) return -ENODEV; cpuidle_pause_and_lock(); cpuidle_disable_device(&pr->power.dev); acpi_processor_get_power_info(pr); if (pr->flags.power) { acpi_processor_setup_cpuidle_cx(pr); ret = cpuidle_enable_device(&pr->power.dev); } cpuidle_resume_and_unlock(); return ret; } int acpi_processor_cst_has_changed(struct acpi_processor *pr) { int cpu; struct acpi_processor *_pr; if (disabled_by_idle_boot_param()) return 0; if (!pr) return -EINVAL; if (nocst) return -ENODEV; if (!pr->flags.power_setup_done) return -ENODEV; /* * FIXME: Design the ACPI notification to make it once per * system instead of once per-cpu. This condition is a hack * to make the code that updates C-States be called once. */ if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { cpuidle_pause_and_lock(); /* Protect against cpu-hotplug */ get_online_cpus(); /* Disable all cpuidle devices */ for_each_online_cpu(cpu) { _pr = per_cpu(processors, cpu); if (!_pr || !_pr->flags.power_setup_done) continue; cpuidle_disable_device(&_pr->power.dev); } /* Populate Updated C-state information */ acpi_processor_get_power_info(pr); acpi_processor_setup_cpuidle_states(pr); /* Enable all cpuidle devices */ for_each_online_cpu(cpu) { _pr = per_cpu(processors, cpu); if (!_pr || !_pr->flags.power_setup_done) continue; acpi_processor_get_power_info(_pr); if (_pr->flags.power) { acpi_processor_setup_cpuidle_cx(_pr); cpuidle_enable_device(&_pr->power.dev); } } put_online_cpus(); cpuidle_resume_and_unlock(); } return 0; } static int acpi_processor_registered; int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { acpi_status status = 0; int retval; static int first_run; if (disabled_by_idle_boot_param()) return 0; if (!first_run) { dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; } if (!pr) return -EINVAL; if (acpi_gbl_FADT.cst_control && !nocst) { status = acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Notifying BIOS of _CST ability failed")); } } acpi_processor_get_power_info(pr); pr->flags.power_setup_done = 1; /* * Install the idle handler if processor power management is supported. * Note that we use previously set idle handler will be used on * platforms that only support C1. */ if (pr->flags.power) { /* Register acpi_idle_driver if not already registered */ if (!acpi_processor_registered) { acpi_processor_setup_cpuidle_states(pr); retval = cpuidle_register_driver(&acpi_idle_driver); if (retval) return retval; printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", acpi_idle_driver.name); } /* Register per-cpu cpuidle_device. Cpuidle driver * must already be registered before registering device */ acpi_processor_setup_cpuidle_cx(pr); retval = cpuidle_register_device(&pr->power.dev); if (retval) { if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); return retval; } acpi_processor_registered++; } return 0; } int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device) { if (disabled_by_idle_boot_param()) return 0; if (pr->flags.power) { cpuidle_unregister_device(&pr->power.dev); acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); } pr->flags.power_setup_done = 0; return 0; }
gpl-2.0
jderrick/linux-torvalds
arch/mips/ralink/rt3883.c
829
5064
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Parts of this file are based on Ralink's 2.6.21 BSP * * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2013 John Crispin <blogic@openwrt.org> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <asm/mipsregs.h> #include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/rt3883.h> #include <asm/mach-ralink/pinmux.h> #include "common.h" static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; static struct rt2880_pmx_func uartf_func[] = { FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8), FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4), FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), }; static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; static struct rt2880_pmx_func pci_func[] = { FUNC("pci-dev", 0, 40, 32), FUNC("pci-host2", 1, 40, 32), FUNC("pci-host1", 2, 40, 32), FUNC("pci-fnc", 3, 40, 32) }; static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; static struct rt2880_pmx_group rt3883_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, RT3883_GPIO_MODE_UART0_SHIFT), GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1), GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG), GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO), GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A), GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G), GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK, RT3883_GPIO_MODE_PCI_SHIFT), GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1), GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2), { 0 } }; static void rt3883_wdt_reset(void) { u32 t; /* enable WDT reset output on GPIO 2 */ t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1); t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT; rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); } void __init ralink_clk_init(void) { unsigned long cpu_rate, sys_rate; u32 syscfg0; u32 clksel; u32 ddr2; syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0); clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) & RT3883_SYSCFG0_CPUCLK_MASK); ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2; switch (clksel) { case RT3883_SYSCFG0_CPUCLK_250: cpu_rate = 250000000; sys_rate = (ddr2) ? 125000000 : 83000000; break; case RT3883_SYSCFG0_CPUCLK_384: cpu_rate = 384000000; sys_rate = (ddr2) ? 128000000 : 96000000; break; case RT3883_SYSCFG0_CPUCLK_480: cpu_rate = 480000000; sys_rate = (ddr2) ? 160000000 : 120000000; break; case RT3883_SYSCFG0_CPUCLK_500: cpu_rate = 500000000; sys_rate = (ddr2) ? 166000000 : 125000000; break; } ralink_clk_add("cpu", cpu_rate); ralink_clk_add("10000100.timer", sys_rate); ralink_clk_add("10000120.watchdog", sys_rate); ralink_clk_add("10000500.uart", 40000000); ralink_clk_add("10000b00.spi", sys_rate); ralink_clk_add("10000c00.uartlite", 40000000); ralink_clk_add("10100000.ethernet", sys_rate); ralink_clk_add("10180000.wmac", 40000000); } void __init ralink_of_remap(void) { rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc"); rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc"); if (!rt_sysc_membase || !rt_memc_membase) panic("Failed to remap core resources"); } void prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE); const char *name; u32 n0; u32 n1; u32 id; n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3); n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7); id = __raw_readl(sysc + RT3883_SYSC_REG_REVID); if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) { soc_info->compatible = "ralink,rt3883-soc"; name = "RT3883"; } else { panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1); } snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, "Ralink %s ver:%u eco:%u", name, (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK, (id & RT3883_REVID_ECO_ID_MASK)); soc_info->mem_base = RT3883_SDRAM_BASE; soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; rt2880_pinmux_data = rt3883_pinmux_data; }
gpl-2.0
ntrdma/ntrdma
arch/mips/alchemy/common/platform.c
1853
12223
/* * Platform device support for Au1x00 SoCs. * * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> * * (C) Copyright Embedded Alley Solutions, Inc 2005 * Author: Pantelis Antoniou <pantelis@embeddedalley.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/slab.h> #include <linux/usb/ehci_pdriver.h> #include <linux/usb/ohci_pdriver.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> #include <asm/mach-au1x00/au1xxx_eth.h> #include <prom.h> static void alchemy_8250_pm(struct uart_port *port, unsigned int state, unsigned int old_state) { #ifdef CONFIG_SERIAL_8250 switch (state) { case 0: alchemy_uart_enable(CPHYSADDR(port->membase)); serial8250_do_pm(port, state, old_state); break; case 3: /* power off */ serial8250_do_pm(port, state, old_state); alchemy_uart_disable(CPHYSADDR(port->membase)); break; default: serial8250_do_pm(port, state, old_state); break; } #endif } #define PORT(_base, _irq) \ { \ .mapbase = _base, \ .irq = _irq, \ .regshift = 2, \ .iotype = UPIO_AU, \ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \ UPF_FIXED_TYPE, \ .type = PORT_16550A, \ .pm = alchemy_8250_pm, \ } static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { [ALCHEMY_CPU_AU1000] = { PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), }, [ALCHEMY_CPU_AU1500] = { PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), }, [ALCHEMY_CPU_AU1100] = { PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), }, [ALCHEMY_CPU_AU1550] = { PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), }, [ALCHEMY_CPU_AU1200] = { PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), }, [ALCHEMY_CPU_AU1300] = { PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT), PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT), PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT), PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT), }, }; static struct platform_device au1xx0_uart_device = { .name = "serial8250", .id = PLAT8250_DEV_AU1X00, }; static void __init alchemy_setup_uarts(int ctype) { long uartclk; int s = sizeof(struct plat_serial8250_port); int c = alchemy_get_uarts(ctype); struct plat_serial8250_port *ports; struct clk *clk = clk_get(NULL, ALCHEMY_PERIPH_CLK); if (IS_ERR(clk)) return; if (clk_prepare_enable(clk)) { clk_put(clk); return; } uartclk = clk_get_rate(clk); clk_put(clk); ports = kzalloc(s * (c + 1), GFP_KERNEL); if (!ports) { printk(KERN_INFO "Alchemy: no memory for UART data\n"); return; } memcpy(ports, au1x00_uart_data[ctype], s * c); au1xx0_uart_device.dev.platform_data = ports; /* Fill up uartclk. */ for (s = 0; s < c; s++) ports[s].uartclk = uartclk; if (platform_device_register(&au1xx0_uart_device)) printk(KERN_INFO "Alchemy: failed to register UARTs\n"); } /* The dmamask must be set for OHCI/EHCI to work */ static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32); static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32); /* Power on callback for the ehci platform driver */ static int alchemy_ehci_power_on(struct platform_device *pdev) { return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1); } /* Power off/suspend callback for the ehci platform driver */ static void alchemy_ehci_power_off(struct platform_device *pdev) { alchemy_usb_control(ALCHEMY_USB_EHCI0, 0); } static struct usb_ehci_pdata alchemy_ehci_pdata = { .no_io_watchdog = 1, .power_on = alchemy_ehci_power_on, .power_off = alchemy_ehci_power_off, .power_suspend = alchemy_ehci_power_off, }; /* Power on callback for the ohci platform driver */ static int alchemy_ohci_power_on(struct platform_device *pdev) { int unit; unit = (pdev->id == 1) ? ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0; return alchemy_usb_control(unit, 1); } /* Power off/suspend callback for the ohci platform driver */ static void alchemy_ohci_power_off(struct platform_device *pdev) { int unit; unit = (pdev->id == 1) ? ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0; alchemy_usb_control(unit, 0); } static struct usb_ohci_pdata alchemy_ohci_pdata = { .power_on = alchemy_ohci_power_on, .power_off = alchemy_ohci_power_off, .power_suspend = alchemy_ohci_power_off, }; static unsigned long alchemy_ohci_data[][2] __initdata = { [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT }, [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT }, [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT }, [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT }, [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT }, [ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT }, }; static unsigned long alchemy_ehci_data[][2] __initdata = { [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT }, [ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT }, }; static int __init _new_usbres(struct resource **r, struct platform_device **d) { *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); if (!*r) return -ENOMEM; *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL); if (!*d) { kfree(*r); return -ENOMEM; } (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32); (*d)->num_resources = 2; (*d)->resource = *r; return 0; } static void __init alchemy_setup_usb(int ctype) { struct resource *res; struct platform_device *pdev; /* setup OHCI0. Every variant has one */ if (_new_usbres(&res, &pdev)) return; res[0].start = alchemy_ohci_data[ctype][0]; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = alchemy_ohci_data[ctype][1]; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ohci-platform"; pdev->id = 0; pdev->dev.dma_mask = &alchemy_ohci_dmamask; pdev->dev.platform_data = &alchemy_ohci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n"); /* setup EHCI0: Au1200/Au1300 */ if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) { if (_new_usbres(&res, &pdev)) return; res[0].start = alchemy_ehci_data[ctype][0]; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = alchemy_ehci_data[ctype][1]; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ehci-platform"; pdev->id = 0; pdev->dev.dma_mask = &alchemy_ehci_dmamask; pdev->dev.platform_data = &alchemy_ehci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n"); } /* Au1300: OHCI1 */ if (ctype == ALCHEMY_CPU_AU1300) { if (_new_usbres(&res, &pdev)) return; res[0].start = AU1300_USB_OHCI1_PHYS_ADDR; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = AU1300_USB_INT; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ohci-platform"; pdev->id = 1; pdev->dev.dma_mask = &alchemy_ohci_dmamask; pdev->dev.platform_data = &alchemy_ohci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n"); } } /* Macro to help defining the Ethernet MAC resources */ #define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */ #define MAC_RES(_base, _enable, _irq, _macdma) \ { \ .start = _base, \ .end = _base + 0xffff, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = _enable, \ .end = _enable + 0x3, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ \ }, \ { \ .start = _macdma, \ .end = _macdma + 0x1ff, \ .flags = IORESOURCE_MEM, \ } static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { [ALCHEMY_CPU_AU1000] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1000_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1500] = { MAC_RES(AU1500_MAC0_PHYS_ADDR, AU1500_MACEN_PHYS_ADDR, AU1500_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1100] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1100_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1550] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1550_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, }; static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { .phy1_search_mac0 = 1, }; static struct platform_device au1xxx_eth0_device = { .name = "au1000-eth", .id = 0, .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth0_platform_data, }; static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { [ALCHEMY_CPU_AU1000] = { MAC_RES(AU1000_MAC1_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR + 4, AU1000_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, [ALCHEMY_CPU_AU1500] = { MAC_RES(AU1500_MAC1_PHYS_ADDR, AU1500_MACEN_PHYS_ADDR + 4, AU1500_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, [ALCHEMY_CPU_AU1550] = { MAC_RES(AU1000_MAC1_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR + 4, AU1550_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, }; static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { .phy1_search_mac0 = 1, }; static struct platform_device au1xxx_eth1_device = { .name = "au1000-eth", .id = 1, .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth1_platform_data, }; void __init au1xxx_override_eth_cfg(unsigned int port, struct au1000_eth_platform_data *eth_data) { if (!eth_data || port > 1) return; if (port == 0) memcpy(&au1xxx_eth0_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); else memcpy(&au1xxx_eth1_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); } static void __init alchemy_setup_macs(int ctype) { int ret, i; unsigned char ethaddr[6]; struct resource *macres; /* Handle 1st MAC */ if (alchemy_get_macs(ctype) < 1) return; macres = kmemdup(au1xxx_eth0_resources[ctype], sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); if (!macres) { printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); return; } au1xxx_eth0_device.resource = macres; i = prom_get_ethernet_addr(ethaddr); if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); ret = platform_device_register(&au1xxx_eth0_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC0\n"); /* Handle 2nd MAC */ if (alchemy_get_macs(ctype) < 2) return; macres = kmemdup(au1xxx_eth1_resources[ctype], sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); if (!macres) { printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); return; } au1xxx_eth1_device.resource = macres; ethaddr[5] += 1; /* next addr for 2nd MAC */ if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); /* Register second MAC if enabled in pinfunc */ if (!(alchemy_rdsys(AU1000_SYS_PINFUNC) & SYS_PF_NI2)) { ret = platform_device_register(&au1xxx_eth1_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC1\n"); } } static int __init au1xxx_platform_init(void) { int ctype = alchemy_get_cputype(); alchemy_setup_uarts(ctype); alchemy_setup_macs(ctype); alchemy_setup_usb(ctype); return 0; } arch_initcall(au1xxx_platform_init);
gpl-2.0
synexxus/synnix
net/mac80211/tkip.c
1853
10828
/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/export.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include "driver-ops.h" #include "key.h" #include "tkip.h" #include "wep.h" #define PHASE1_LOOP_COUNT 8 /* * 2-byte by 2-byte subset of the full AES S-box table; second part of this * table is identical to first part but byte-swapped */ static const u16 tkip_sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static u16 tkipS(u16 val) { return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]); } static u8 *write_tkip_iv(u8 *pos, u16 iv16) { *pos++ = iv16 >> 8; *pos++ = ((iv16 >> 8) | 0x20) & 0x7f; *pos++ = iv16 & 0xFF; return pos; } /* * P1K := Phase1(TA, TK, TSC) * TA = transmitter address (48 bits) * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) * P1K: 80 bits */ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, const u8 *ta, u32 tsc_IV32) { int i, j; u16 *p1k = ctx->p1k; p1k[0] = tsc_IV32 & 0xFFFF; p1k[1] = tsc_IV32 >> 16; p1k[2] = get_unaligned_le16(ta + 0); p1k[3] = get_unaligned_le16(ta + 2); p1k[4] = get_unaligned_le16(ta + 4); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j)); p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j)); p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j)); p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; } ctx->state = TKIP_STATE_PHASE1_DONE; ctx->p1k_iv32 = tsc_IV32; } static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, u16 tsc_IV16, u8 *rc4key) { u16 ppk[6]; const u16 *p1k = ctx->p1k; int i; ppk[0] = p1k[0]; ppk[1] = p1k[1]; ppk[2] = p1k[2]; ppk[3] = p1k[3]; ppk[4] = p1k[4]; ppk[5] = p1k[4] + tsc_IV16; ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0)); ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2)); ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4)); ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6)); ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8)); ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10)); ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1); ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1); ppk[2] += ror16(ppk[1], 1); ppk[3] += ror16(ppk[2], 1); ppk[4] += ror16(ppk[3], 1); ppk[5] += ror16(ppk[4], 1); rc4key = write_tkip_iv(rc4key, tsc_IV16); *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF; for (i = 0; i < 6; i++) put_unaligned_le16(ppk[i], rc4key + 2 * i); } /* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets * of the IV. Returns pointer to the octet following IVs (i.e., beginning of * the packet payload). */ u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key) { lockdep_assert_held(&key->u.tkip.txlock); pos = write_tkip_iv(pos, key->u.tkip.tx.iv16); *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; put_unaligned_le32(key->u.tkip.tx.iv32, pos); return pos + 4; } static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32) { struct ieee80211_sub_if_data *sdata = key->sdata; struct tkip_ctx *ctx = &key->u.tkip.tx; const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; lockdep_assert_held(&key->u.tkip.txlock); /* * Update the P1K when the IV32 is different from the value it * had when we last computed it (or when not initialised yet). * This might flip-flop back and forth if packets are processed * out-of-order due to the different ACs, but then we have to * just compute the P1K more often. */ if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT) tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32); } void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf, u32 iv32, u16 *p1k) { struct ieee80211_key *key = (struct ieee80211_key *) container_of(keyconf, struct ieee80211_key, conf); struct tkip_ctx *ctx = &key->u.tkip.tx; spin_lock_bh(&key->u.tkip.txlock); ieee80211_compute_tkip_p1k(key, iv32); memcpy(p1k, ctx->p1k, sizeof(ctx->p1k)); spin_unlock_bh(&key->u.tkip.txlock); } EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv); void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, const u8 *ta, u32 iv32, u16 *p1k) { const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; struct tkip_ctx ctx; tkip_mixing_phase1(tk, &ctx, ta, iv32); memcpy(p1k, ctx.p1k, sizeof(ctx.p1k)); } EXPORT_SYMBOL(ieee80211_get_tkip_rx_p1k); void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u8 *p2k) { struct ieee80211_key *key = (struct ieee80211_key *) container_of(keyconf, struct ieee80211_key, conf); const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; struct tkip_ctx *ctx = &key->u.tkip.tx; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); u32 iv32 = get_unaligned_le32(&data[4]); u16 iv16 = data[2] | (data[0] << 8); spin_lock(&key->u.tkip.txlock); ieee80211_compute_tkip_p1k(key, iv32); tkip_mixing_phase2(tk, ctx, iv16, p2k); spin_unlock(&key->u.tkip.txlock); } EXPORT_SYMBOL(ieee80211_get_tkip_p2k); /* * Encrypt packet payload with TKIP using @key. @pos is a pointer to the * beginning of the buffer containing payload. This payload must include * the IV/Ext.IV and space for (taildroom) four octets for ICV. * @payload_len is the length of payload (_not_ including IV/ICV length). * @ta is the transmitter addresses. */ int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, struct sk_buff *skb, u8 *payload, size_t payload_len) { u8 rc4key[16]; ieee80211_get_tkip_p2k(&key->conf, skb, rc4key); return ieee80211_wep_encrypt_data(tfm, rc4key, 16, payload, payload_len); } /* Decrypt packet payload with TKIP using @key. @pos is a pointer to the * beginning of the buffer containing IEEE 802.11 header payload, i.e., * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the * length of payload, including IV, Ext. IV, MIC, ICV. */ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, u8 *payload, size_t payload_len, u8 *ta, u8 *ra, int only_iv, int queue, u32 *out_iv32, u16 *out_iv16) { u32 iv32; u32 iv16; u8 rc4key[16], keyid, *pos = payload; int res; const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; if (payload_len < 12) return -1; iv16 = (pos[0] << 8) | pos[2]; keyid = pos[3]; iv32 = get_unaligned_le32(pos + 4); pos += 8; if (!(keyid & (1 << 5))) return TKIP_DECRYPT_NO_EXT_IV; if ((keyid >> 6) != key->conf.keyidx) return TKIP_DECRYPT_INVALID_KEYIDX; if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && (iv32 < key->u.tkip.rx[queue].iv32 || (iv32 == key->u.tkip.rx[queue].iv32 && iv16 <= key->u.tkip.rx[queue].iv16))) return TKIP_DECRYPT_REPLAY; if (only_iv) { res = TKIP_DECRYPT_OK; key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; goto done; } if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT || key->u.tkip.rx[queue].iv32 != iv32) { /* IV16 wrapped around - perform TKIP phase 1 */ tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); } if (key->local->ops->update_tkip_key && key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) { struct ieee80211_sub_if_data *sdata = key->sdata; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(key->sdata->bss, struct ieee80211_sub_if_data, u.ap); drv_update_tkip_key(key->local, sdata, &key->conf, key->sta, iv32, key->u.tkip.rx[queue].p1k); key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; } tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); done: if (res == TKIP_DECRYPT_OK) { /* * Record previously received IV, will be copied into the * key information after MIC verification. It is possible * that we don't catch replays of fragments but that's ok * because the Michael MIC verication will then fail. */ *out_iv32 = iv32; *out_iv16 = iv16; } return res; }
gpl-2.0
MoKee/android_kernel_motorola_apq8084
drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
2877
4530
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/gpuobj.h> #include <core/class.h> #include <subdev/fb.h> #include <engine/dmaobj.h> struct nv50_dmaeng_priv { struct nouveau_dmaeng base; }; static int nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { u32 flags0 = nv_mclass(dmaobj); u32 flags5 = 0x00000000; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case NV50_CHANNEL_DMA_CLASS: case NV84_CHANNEL_DMA_CLASS: case NV50_CHANNEL_IND_CLASS: case NV84_CHANNEL_IND_CLASS: case NV50_DISP_MAST_CLASS: case NV84_DISP_MAST_CLASS: case NV94_DISP_MAST_CLASS: case NVA0_DISP_MAST_CLASS: case NVA3_DISP_MAST_CLASS: case NV50_DISP_SYNC_CLASS: case NV84_DISP_SYNC_CLASS: case NV94_DISP_SYNC_CLASS: case NVA0_DISP_SYNC_CLASS: case NVA3_DISP_SYNC_CLASS: case NV50_DISP_OVLY_CLASS: case NV84_DISP_OVLY_CLASS: case NV94_DISP_OVLY_CLASS: case NVA0_DISP_OVLY_CLASS: case NVA3_DISP_OVLY_CLASS: break; default: return -EINVAL; } } if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { if (dmaobj->target == NV_MEM_TARGET_VM) { dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; } else { dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; } } flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); switch (dmaobj->target) { case NV_MEM_TARGET_VM: flags0 |= 0x00000000; break; case NV_MEM_TARGET_VRAM: flags0 |= 0x00010000; break; case NV_MEM_TARGET_PCI: flags0 |= 0x00020000; break; case NV_MEM_TARGET_PCI_NOSNOOP: flags0 |= 0x00030000; break; default: return -EINVAL; } switch (dmaobj->access) { case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: flags0 |= 0x00080000; break; } ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { nv_wo32(*pgpuobj, 0x00, flags0); nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | upper_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, flags5); } return ret; } static int nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_dmaeng_priv *priv; int ret; ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nouveau_dmaobj_sclass; priv->base.bind = nv50_dmaobj_bind; return 0; } struct nouveau_oclass nv50_dmaeng_oclass = { .handle = NV_ENGINE(DMAOBJ, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_dmaeng_ctor, .dtor = _nouveau_dmaeng_dtor, .init = _nouveau_dmaeng_init, .fini = _nouveau_dmaeng_fini, }, };
gpl-2.0
IKGapirov/android_kernel_SM-G800H
drivers/mfd/max8998.c
4157
8584
/* * max8998.c - mfd core driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/mfd/core.h> #include <linux/mfd/max8998.h> #include <linux/mfd/max8998-private.h> #define RTC_I2C_ADDR (0x0c >> 1) static struct mfd_cell max8998_devs[] = { { .name = "max8998-pmic", }, { .name = "max8998-rtc", }, { .name = "max8998-battery", }, }; static struct mfd_cell lp3974_devs[] = { { .name = "lp3974-pmic", }, { .name = "lp3974-rtc", }, }; int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); int ret; mutex_lock(&max8998->iolock); ret = i2c_smbus_read_byte_data(i2c, reg); mutex_unlock(&max8998->iolock); if (ret < 0) return ret; ret &= 0xff; *dest = ret; return 0; } EXPORT_SYMBOL(max8998_read_reg); int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); int ret; mutex_lock(&max8998->iolock); ret = i2c_smbus_read_i2c_block_data(i2c, reg, count, buf); mutex_unlock(&max8998->iolock); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL(max8998_bulk_read); int max8998_write_reg(struct i2c_client *i2c, u8 reg, u8 value) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); int ret; mutex_lock(&max8998->iolock); ret = i2c_smbus_write_byte_data(i2c, reg, value); mutex_unlock(&max8998->iolock); return ret; } EXPORT_SYMBOL(max8998_write_reg); int max8998_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); int ret; mutex_lock(&max8998->iolock); ret = i2c_smbus_write_i2c_block_data(i2c, reg, count, buf); mutex_unlock(&max8998->iolock); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL(max8998_bulk_write); int max8998_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); int ret; mutex_lock(&max8998->iolock); ret = i2c_smbus_read_byte_data(i2c, reg); if (ret >= 0) { u8 old_val = ret & 0xff; u8 new_val = (val & mask) | (old_val & (~mask)); ret = i2c_smbus_write_byte_data(i2c, reg, new_val); } mutex_unlock(&max8998->iolock); return ret; } EXPORT_SYMBOL(max8998_update_reg); static int max8998_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max8998_platform_data *pdata = i2c->dev.platform_data; struct max8998_dev *max8998; int ret = 0; max8998 = kzalloc(sizeof(struct max8998_dev), GFP_KERNEL); if (max8998 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, max8998); max8998->dev = &i2c->dev; max8998->i2c = i2c; max8998->irq = i2c->irq; max8998->type = id->driver_data; if (pdata) { max8998->ono = pdata->ono; max8998->irq_base = pdata->irq_base; max8998->wakeup = pdata->wakeup; } mutex_init(&max8998->iolock); max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); i2c_set_clientdata(max8998->rtc, max8998); max8998_irq_init(max8998); pm_runtime_set_active(max8998->dev); switch (id->driver_data) { case TYPE_LP3974: ret = mfd_add_devices(max8998->dev, -1, lp3974_devs, ARRAY_SIZE(lp3974_devs), NULL, 0); break; case TYPE_MAX8998: ret = mfd_add_devices(max8998->dev, -1, max8998_devs, ARRAY_SIZE(max8998_devs), NULL, 0); break; default: ret = -EINVAL; } if (ret < 0) goto err; device_init_wakeup(max8998->dev, max8998->wakeup); return ret; err: mfd_remove_devices(max8998->dev); max8998_irq_exit(max8998); i2c_unregister_device(max8998->rtc); kfree(max8998); return ret; } static int max8998_i2c_remove(struct i2c_client *i2c) { struct max8998_dev *max8998 = i2c_get_clientdata(i2c); mfd_remove_devices(max8998->dev); max8998_irq_exit(max8998); i2c_unregister_device(max8998->rtc); kfree(max8998); return 0; } static const struct i2c_device_id max8998_i2c_id[] = { { "max8998", TYPE_MAX8998 }, { "lp3974", TYPE_LP3974}, { } }; MODULE_DEVICE_TABLE(i2c, max8998_i2c_id); static int max8998_suspend(struct device *dev) { struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); struct max8998_dev *max8998 = i2c_get_clientdata(i2c); if (device_may_wakeup(dev)) irq_set_irq_wake(max8998->irq, 1); return 0; } static int max8998_resume(struct device *dev) { struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); struct max8998_dev *max8998 = i2c_get_clientdata(i2c); if (device_may_wakeup(dev)) irq_set_irq_wake(max8998->irq, 0); /* * In LP3974, if IRQ registers are not "read & clear" * when it's set during sleep, the interrupt becomes * disabled. */ return max8998_irq_resume(i2c_get_clientdata(i2c)); } struct max8998_reg_dump { u8 addr; u8 val; }; #define SAVE_ITEM(x) { .addr = (x), .val = 0x0, } static struct max8998_reg_dump max8998_dump[] = { SAVE_ITEM(MAX8998_REG_IRQM1), SAVE_ITEM(MAX8998_REG_IRQM2), SAVE_ITEM(MAX8998_REG_IRQM3), SAVE_ITEM(MAX8998_REG_IRQM4), SAVE_ITEM(MAX8998_REG_STATUSM1), SAVE_ITEM(MAX8998_REG_STATUSM2), SAVE_ITEM(MAX8998_REG_CHGR1), SAVE_ITEM(MAX8998_REG_CHGR2), SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1), SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1), SAVE_ITEM(MAX8998_REG_BUCK_ACTIVE_DISCHARGE3), SAVE_ITEM(MAX8998_REG_ONOFF1), SAVE_ITEM(MAX8998_REG_ONOFF2), SAVE_ITEM(MAX8998_REG_ONOFF3), SAVE_ITEM(MAX8998_REG_ONOFF4), SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE1), SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE2), SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE3), SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE4), SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE1), SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE2), SAVE_ITEM(MAX8998_REG_LDO2_LDO3), SAVE_ITEM(MAX8998_REG_LDO4), SAVE_ITEM(MAX8998_REG_LDO5), SAVE_ITEM(MAX8998_REG_LDO6), SAVE_ITEM(MAX8998_REG_LDO7), SAVE_ITEM(MAX8998_REG_LDO8_LDO9), SAVE_ITEM(MAX8998_REG_LDO10_LDO11), SAVE_ITEM(MAX8998_REG_LDO12), SAVE_ITEM(MAX8998_REG_LDO13), SAVE_ITEM(MAX8998_REG_LDO14), SAVE_ITEM(MAX8998_REG_LDO15), SAVE_ITEM(MAX8998_REG_LDO16), SAVE_ITEM(MAX8998_REG_LDO17), SAVE_ITEM(MAX8998_REG_BKCHR), SAVE_ITEM(MAX8998_REG_LBCNFG1), SAVE_ITEM(MAX8998_REG_LBCNFG2), }; /* Save registers before hibernation */ static int max8998_freeze(struct device *dev) { struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); int i; for (i = 0; i < ARRAY_SIZE(max8998_dump); i++) max8998_read_reg(i2c, max8998_dump[i].addr, &max8998_dump[i].val); return 0; } /* Restore registers after hibernation */ static int max8998_restore(struct device *dev) { struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); int i; for (i = 0; i < ARRAY_SIZE(max8998_dump); i++) max8998_write_reg(i2c, max8998_dump[i].addr, max8998_dump[i].val); return 0; } static const struct dev_pm_ops max8998_pm = { .suspend = max8998_suspend, .resume = max8998_resume, .freeze = max8998_freeze, .restore = max8998_restore, }; static struct i2c_driver max8998_i2c_driver = { .driver = { .name = "max8998", .owner = THIS_MODULE, .pm = &max8998_pm, }, .probe = max8998_i2c_probe, .remove = max8998_i2c_remove, .id_table = max8998_i2c_id, }; static int __init max8998_i2c_init(void) { return i2c_add_driver(&max8998_i2c_driver); } /* init early so consumer devices can complete system boot */ subsys_initcall(max8998_i2c_init); static void __exit max8998_i2c_exit(void) { i2c_del_driver(&max8998_i2c_driver); } module_exit(max8998_i2c_exit); MODULE_DESCRIPTION("MAXIM 8998 multi-function core driver"); MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
davidmueller13/davidskernel_lt03lte_tw_5.1.1
drivers/net/wireless/ath/ath5k/qcu.c
4413
20852
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /********************************************\ Queue Control Unit, DCF Control Unit Functions \********************************************/ #include "ath5k.h" #include "reg.h" #include "debug.h" #include <linux/log2.h> /** * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions * * Here we setup parameters for the 12 available TX queues. Note that * on the various registers we can usually only map the first 10 of them so * basically we have 10 queues to play with. Each queue has a matching * QCU that controls when the queue will get triggered and multiple QCUs * can be mapped to a single DCU that controls the various DFS parameters * for the various queues. In our setup we have a 1:1 mapping between QCUs * and DCUs allowing us to have different DFS settings for each queue. * * When a frame goes into a TX queue, QCU decides when it'll trigger a * transmission based on various criteria (such as how many data we have inside * it's buffer or -if it's a beacon queue- if it's time to fire up the queue * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler * (arbitrator) decides the priority of each QCU based on it's configuration * (e.g. beacons are always transmitted when they leave DCU bypassing all other * frames from other queues waiting to be transmitted). After a frame leaves * the DCU it goes to PCU for further processing and then to PHY for * the actual transmission. */ /******************\ * Helper functions * \******************/ /** * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue * @ah: The &struct ath5k_hw * @queue: One of enum ath5k_tx_queue_id */ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) { u32 pending; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) return false; /* XXX: How about AR5K_CFG_TXCNT ? */ if (ah->ah_version == AR5K_AR5210) return false; pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); pending &= AR5K_QCU_STS_FRMPENDCNT; /* It's possible to have no frames pending even if TXE * is set. To indicate that q has not stopped return * true */ if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) return true; return pending; } /** * ath5k_hw_release_tx_queue() - Set a transmit queue inactive * @ah: The &struct ath5k_hw * @queue: One of enum ath5k_tx_queue_id */ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) { if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) return; /* This queue will be skipped in further operations */ ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; /*For SIMR setup*/ AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); } /** * ath5k_cw_validate() - Make sure the given cw is valid * @cw_req: The contention window value to check * * Make sure cw is a power of 2 minus 1 and smaller than 1024 */ static u16 ath5k_cw_validate(u16 cw_req) { cw_req = min(cw_req, (u16)1023); /* Check if cw_req + 1 a power of 2 */ if (is_power_of_2(cw_req + 1)) return cw_req; /* Check if cw_req is a power of 2 */ if (is_power_of_2(cw_req)) return cw_req - 1; /* If none of the above is correct * find the closest power of 2 */ cw_req = (u16) roundup_pow_of_two(cw_req) - 1; return cw_req; } /** * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue * @ah: The &struct ath5k_hw * @queue: One of enum ath5k_tx_queue_id * @queue_info: The &struct ath5k_txq_info to fill */ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info) { memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); return 0; } /** * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue * @ah: The &struct ath5k_hw * @queue: One of enum ath5k_tx_queue_id * @qinfo: The &struct ath5k_txq_info to use * * Returns 0 on success or -EIO if queue is inactive */ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *qinfo) { struct ath5k_txq_info *qi; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); qi = &ah->ah_txq[queue]; if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE) return -EIO; /* copy and validate values */ qi->tqi_type = qinfo->tqi_type; qi->tqi_subtype = qinfo->tqi_subtype; qi->tqi_flags = qinfo->tqi_flags; /* * According to the docs: Although the AIFS field is 8 bit wide, * the maximum supported value is 0xFC. Setting it higher than that * will cause the DCU to hang. */ qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC); qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min); qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max); qi->tqi_cbr_period = qinfo->tqi_cbr_period; qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit; qi->tqi_burst_time = qinfo->tqi_burst_time; qi->tqi_ready_time = qinfo->tqi_ready_time; /*XXX: Is this supported on 5210 ?*/ /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/ if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA && ((qinfo->tqi_subtype == AR5K_WME_AC_VI) || (qinfo->tqi_subtype == AR5K_WME_AC_VO))) || qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD) qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; return 0; } /** * ath5k_hw_setup_tx_queue() - Initialize a transmit queue * @ah: The &struct ath5k_hw * @queue_type: One of enum ath5k_tx_queue * @queue_info: The &struct ath5k_txq_info to use * * Returns 0 on success, -EINVAL on invalid arguments */ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info) { unsigned int queue; int ret; /* * Get queue by type */ /* 5210 only has 2 queues */ if (ah->ah_capabilities.cap_queues.q_tx_num == 2) { switch (queue_type) { case AR5K_TX_QUEUE_DATA: queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; break; default: return -EINVAL; } } else { switch (queue_type) { case AR5K_TX_QUEUE_DATA: for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; ah->ah_txq[queue].tqi_type != AR5K_TX_QUEUE_INACTIVE; queue++) { if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) return -EINVAL; } break; case AR5K_TX_QUEUE_UAPSD: queue = AR5K_TX_QUEUE_ID_UAPSD; break; case AR5K_TX_QUEUE_BEACON: queue = AR5K_TX_QUEUE_ID_BEACON; break; case AR5K_TX_QUEUE_CAB: queue = AR5K_TX_QUEUE_ID_CAB; break; default: return -EINVAL; } } /* * Setup internal queue structure */ memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info)); ah->ah_txq[queue].tqi_type = queue_type; if (queue_info != NULL) { queue_info->tqi_type = queue_type; ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info); if (ret) return ret; } /* * We use ah_txq_status to hold a temp value for * the Secondary interrupt mask registers on 5211+ * check out ath5k_hw_reset_tx_queue */ AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue); return queue; } /*******************************\ * Single QCU/DCU initialization * \*******************************/ /** * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU * @ah: The &struct ath5k_hw * @queue: One of enum ath5k_tx_queue_id * * This function is used when initializing a queue, to set * retry limits based on ah->ah_retry_* and the chipset used. */ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, unsigned int queue) { /* Single data queue on AR5210 */ if (ah->ah_version == AR5K_AR5210) { struct ath5k_txq_info *tq = &ah->ah_txq[queue]; if (queue > 0) return; ath5k_hw_reg_write(ah, (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) | AR5K_REG_SM(ah->ah_retry_long, AR5K_NODCU_RETRY_LMT_SLG_RETRY) | AR5K_REG_SM(ah->ah_retry_short, AR5K_NODCU_RETRY_LMT_SSH_RETRY) | AR5K_REG_SM(ah->ah_retry_long, AR5K_NODCU_RETRY_LMT_LG_RETRY) | AR5K_REG_SM(ah->ah_retry_short, AR5K_NODCU_RETRY_LMT_SH_RETRY), AR5K_NODCU_RETRY_LMT); /* DCU on AR5211+ */ } else { ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_retry_long, AR5K_DCU_RETRY_LMT_RTS) | AR5K_REG_SM(ah->ah_retry_long, AR5K_DCU_RETRY_LMT_STA_RTS) | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short), AR5K_DCU_RETRY_LMT_STA_DATA), AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); } } /** * ath5k_hw_reset_tx_queue() - Initialize a single hw queue * @ah: The &struct ath5k_hw * @queue: One of enum ath5k_tx_queue_id * * Set DCF properties for the given transmit queue on DCU * and configures all queue-specific parameters. */ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) { struct ath5k_txq_info *tq = &ah->ah_txq[queue]; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); tq = &ah->ah_txq[queue]; /* Skip if queue inactive or if we are on AR5210 * that doesn't have QCU/DCU */ if ((ah->ah_version == AR5K_AR5210) || (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)) return 0; /* * Set contention window (cw_min/cw_max) * and arbitrated interframe space (aifs)... */ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS), AR5K_QUEUE_DFS_LOCAL_IFS(queue)); /* * Set tx retry limits for this queue */ ath5k_hw_set_tx_retry_limits(ah, queue); /* * Set misc registers */ /* Enable DCU to wait for next fragment from QCU */ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), AR5K_DCU_MISC_FRAG_WAIT); /* On Maui and Spirit use the global seqnum on DCU */ if (ah->ah_mac_version < AR5K_SREV_AR5211) AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), AR5K_DCU_MISC_SEQNUM_CTL); /* Constant bit rate period */ if (tq->tqi_cbr_period) { ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, AR5K_QCU_CBRCFG_INTVAL) | AR5K_REG_SM(tq->tqi_cbr_overflow_limit, AR5K_QCU_CBRCFG_ORN_THRES), AR5K_QUEUE_CBRCFG(queue)); AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_FRSHED_CBR); if (tq->tqi_cbr_overflow_limit) AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_CBR_THRES_ENABLE); } /* Ready time interval */ if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB)) ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, AR5K_QCU_RDYTIMECFG_INTVAL) | AR5K_QCU_RDYTIMECFG_ENABLE, AR5K_QUEUE_RDYTIMECFG(queue)); if (tq->tqi_burst_time) { ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time, AR5K_DCU_CHAN_TIME_DUR) | AR5K_DCU_CHAN_TIME_ENABLE, AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_RDY_VEOL_POLICY); } /* Enable/disable Post frame backoff */ if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS, AR5K_QUEUE_DFS_MISC(queue)); /* Enable/disable fragmentation burst backoff */ if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG, AR5K_QUEUE_DFS_MISC(queue)); /* * Set registers by queue type */ switch (tq->tqi_type) { case AR5K_TX_QUEUE_BEACON: AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_FRSHED_DBA_GT | AR5K_QCU_MISC_CBREXP_BCN_DIS | AR5K_QCU_MISC_BCN_ENABLE); AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << AR5K_DCU_MISC_ARBLOCK_CTL_S) | AR5K_DCU_MISC_ARBLOCK_IGNORE | AR5K_DCU_MISC_POST_FR_BKOFF_DIS | AR5K_DCU_MISC_BCN_ENABLE); break; case AR5K_TX_QUEUE_CAB: /* XXX: use BCN_SENT_GT, if we can figure out how */ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_FRSHED_DBA_GT | AR5K_QCU_MISC_CBREXP_DIS | AR5K_QCU_MISC_CBREXP_BCN_DIS); ath5k_hw_reg_write(ah, ((tq->tqi_ready_time - (AR5K_TUNE_SW_BEACON_RESP - AR5K_TUNE_DMA_BEACON_RESP) - AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | AR5K_QCU_RDYTIMECFG_ENABLE, AR5K_QUEUE_RDYTIMECFG(queue)); AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << AR5K_DCU_MISC_ARBLOCK_CTL_S)); break; case AR5K_TX_QUEUE_UAPSD: AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_CBREXP_DIS); break; case AR5K_TX_QUEUE_DATA: default: break; } /* TODO: Handle frame compression */ /* * Enable interrupts for this tx queue * in the secondary interrupt mask registers */ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue); if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE) AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue); /* Update secondary interrupt mask registers */ /* Filter out inactive queues */ ah->ah_txq_imr_txok &= ah->ah_txq_status; ah->ah_txq_imr_txerr &= ah->ah_txq_status; ah->ah_txq_imr_txurn &= ah->ah_txq_status; ah->ah_txq_imr_txdesc &= ah->ah_txq_status; ah->ah_txq_imr_txeol &= ah->ah_txq_status; ah->ah_txq_imr_cbrorn &= ah->ah_txq_status; ah->ah_txq_imr_cbrurn &= ah->ah_txq_status; ah->ah_txq_imr_qtrig &= ah->ah_txq_status; ah->ah_txq_imr_nofrm &= ah->ah_txq_status; ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok, AR5K_SIMR0_QCU_TXOK) | AR5K_REG_SM(ah->ah_txq_imr_txdesc, AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0); ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr, AR5K_SIMR1_QCU_TXERR) | AR5K_REG_SM(ah->ah_txq_imr_txeol, AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1); /* Update SIMR2 but don't overwrite rest simr2 settings */ AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN); AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_REG_SM(ah->ah_txq_imr_txurn, AR5K_SIMR2_QCU_TXURN)); ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn, AR5K_SIMR3_QCBRORN) | AR5K_REG_SM(ah->ah_txq_imr_cbrurn, AR5K_SIMR3_QCBRURN), AR5K_SIMR3); ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig, AR5K_SIMR4_QTRIG), AR5K_SIMR4); /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm, AR5K_TXNOFRM_QCU), AR5K_TXNOFRM); /* No queue has TXNOFRM enabled, disable the interrupt * by setting AR5K_TXNOFRM to zero */ if (ah->ah_txq_imr_nofrm == 0) ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM); /* Set QCU mask for this DCU to save power */ AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue); return 0; } /**************************\ * Global QCU/DCU functions * \**************************/ /** * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU * @ah: The &struct ath5k_hw * @slot_time: Slot time in us * * Sets the global IFS intervals on DCU (also works on AR5210) for * the given slot time and the current bwmode. */ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) { struct ieee80211_channel *channel = ah->ah_current_channel; struct ieee80211_rate *rate; u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) return -EINVAL; sifs = ath5k_hw_get_default_sifs(ah); sifs_clock = ath5k_hw_htoclock(ah, sifs - 2); /* EIFS * Txtime of ack at lowest rate + SIFS + DIFS * (DIFS = SIFS + 2 * Slot time) * * Note: HAL has some predefined values for EIFS * Turbo: (37 + 2 * 6) * Default: (74 + 2 * 9) * Half: (149 + 2 * 13) * Quarter: (298 + 2 * 21) * * (74 + 2 * 6) for AR5210 default and turbo ! * * According to the formula we have * ack_tx_time = 25 for turbo and * ack_tx_time = 42.5 * clock multiplier * for default/half/quarter. * * This can't be right, 42 is what we would get * from ath5k_hw_get_frame_dur_for_bwmode or * ieee80211_generic_frame_duration for zero frame * length and without SIFS ! * * Also we have different lowest rate for 802.11a */ if (channel->band == IEEE80211_BAND_5GHZ) rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0]; else rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0]; ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); /* ack_tx_time includes an SIFS already */ eifs = ack_tx_time + sifs + 2 * slot_time; eifs_clock = ath5k_hw_htoclock(ah, eifs); /* Set IFS settings on AR5210 */ if (ah->ah_version == AR5K_AR5210) { u32 pifs, pifs_clock, difs, difs_clock; /* Set slot time */ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME); /* Set EIFS */ eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS); /* PIFS = Slot time + SIFS */ pifs = slot_time + sifs; pifs_clock = ath5k_hw_htoclock(ah, pifs); pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS); /* DIFS = SIFS + 2 * Slot time */ difs = sifs + 2 * slot_time; difs_clock = ath5k_hw_htoclock(ah, difs); /* Set SIFS/DIFS */ ath5k_hw_reg_write(ah, (difs_clock << AR5K_IFS0_DIFS_S) | sifs_clock, AR5K_IFS0); /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */ ath5k_hw_reg_write(ah, pifs_clock | eifs_clock | (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S), AR5K_IFS1); return 0; } /* Set IFS slot time */ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT); /* Set EIFS interval */ ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS); /* Set SIFS interval in usecs */ AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC, AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC, sifs); /* Set SIFS interval in clock cycles */ ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS); return 0; } /** * ath5k_hw_init_queues() - Initialize tx queues * @ah: The &struct ath5k_hw * * Initializes all tx queues based on information on * ah->ah_txq* set by the driver */ int ath5k_hw_init_queues(struct ath5k_hw *ah) { int i, ret; /* TODO: HW Compression support for data queues */ /* TODO: Burst prefetch for data queues */ /* * Reset queues and start beacon timers at the end of the reset routine * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping * Note: If we want we can assign multiple qcus on one dcu. */ if (ah->ah_version != AR5K_AR5210) for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) { ret = ath5k_hw_reset_tx_queue(ah, i); if (ret) { ATH5K_ERR(ah, "failed to reset TX queue #%d\n", i); return ret; } } else /* No QCU/DCU on AR5210, just set tx * retry limits. We set IFS parameters * on ath5k_hw_set_ifs_intervals */ ath5k_hw_set_tx_retry_limits(ah, 0); /* Set the turbo flag when operating on 40MHz */ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC, AR5K_DCU_GBL_IFS_MISC_TURBO_MODE); /* If we didn't set IFS timings through * ath5k_hw_set_coverage_class make sure * we set them here */ if (!ah->ah_coverage_class) { unsigned int slot_time = ath5k_hw_get_default_slottime(ah); ath5k_hw_set_ifs_intervals(ah, slot_time); } return 0; }
gpl-2.0
nixholas/msm-nexus5
drivers/rtc/rtc-pxa.c
4925
11478
/* * Real Time Clock interface for XScale PXA27x and PXA3xx * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #define TIMER_FREQ CLOCK_TICK_RATE #define RTC_DEF_DIVIDER (32768 - 1) #define RTC_DEF_TRIM 0 #define MAXFREQ_PERIODIC 1000 /* * PXA Registers and bits definitions */ #define RTSR_PICE (1 << 15) /* Periodic interrupt count enable */ #define RTSR_PIALE (1 << 14) /* Periodic interrupt Alarm enable */ #define RTSR_PIAL (1 << 13) /* Periodic interrupt detected */ #define RTSR_SWALE2 (1 << 11) /* RTC stopwatch alarm2 enable */ #define RTSR_SWAL2 (1 << 10) /* RTC stopwatch alarm2 detected */ #define RTSR_SWALE1 (1 << 9) /* RTC stopwatch alarm1 enable */ #define RTSR_SWAL1 (1 << 8) /* RTC stopwatch alarm1 detected */ #define RTSR_RDALE2 (1 << 7) /* RTC alarm2 enable */ #define RTSR_RDAL2 (1 << 6) /* RTC alarm2 detected */ #define RTSR_RDALE1 (1 << 5) /* RTC alarm1 enable */ #define RTSR_RDAL1 (1 << 4) /* RTC alarm1 detected */ #define RTSR_HZE (1 << 3) /* HZ interrupt enable */ #define RTSR_ALE (1 << 2) /* RTC alarm interrupt enable */ #define RTSR_HZ (1 << 1) /* HZ rising-edge detected */ #define RTSR_AL (1 << 0) /* RTC alarm detected */ #define RTSR_TRIG_MASK (RTSR_AL | RTSR_HZ | RTSR_RDAL1 | RTSR_RDAL2\ | RTSR_SWAL1 | RTSR_SWAL2) #define RYxR_YEAR_S 9 #define RYxR_YEAR_MASK (0xfff << RYxR_YEAR_S) #define RYxR_MONTH_S 5 #define RYxR_MONTH_MASK (0xf << RYxR_MONTH_S) #define RYxR_DAY_MASK 0x1f #define RDxR_HOUR_S 12 #define RDxR_HOUR_MASK (0x1f << RDxR_HOUR_S) #define RDxR_MIN_S 6 #define RDxR_MIN_MASK (0x3f << RDxR_MIN_S) #define RDxR_SEC_MASK 0x3f #define RTSR 0x08 #define RTTR 0x0c #define RDCR 0x10 #define RYCR 0x14 #define RDAR1 0x18 #define RYAR1 0x1c #define RTCPICR 0x34 #define PIAR 0x38 #define rtc_readl(pxa_rtc, reg) \ __raw_readl((pxa_rtc)->base + (reg)) #define rtc_writel(pxa_rtc, reg, value) \ __raw_writel((value), (pxa_rtc)->base + (reg)) struct pxa_rtc { struct resource *ress; void __iomem *base; int irq_1Hz; int irq_Alrm; struct rtc_device *rtc; spinlock_t lock; /* Protects this structure */ }; static u32 ryxr_calc(struct rtc_time *tm) { return ((tm->tm_year + 1900) << RYxR_YEAR_S) | ((tm->tm_mon + 1) << RYxR_MONTH_S) | tm->tm_mday; } static u32 rdxr_calc(struct rtc_time *tm) { return (tm->tm_hour << RDxR_HOUR_S) | (tm->tm_min << RDxR_MIN_S) | tm->tm_sec; } static void tm_calc(u32 rycr, u32 rdcr, struct rtc_time *tm) { tm->tm_year = ((rycr & RYxR_YEAR_MASK) >> RYxR_YEAR_S) - 1900; tm->tm_mon = (((rycr & RYxR_MONTH_MASK) >> RYxR_MONTH_S)) - 1; tm->tm_mday = (rycr & RYxR_DAY_MASK); tm->tm_hour = (rdcr & RDxR_HOUR_MASK) >> RDxR_HOUR_S; tm->tm_min = (rdcr & RDxR_MIN_MASK) >> RDxR_MIN_S; tm->tm_sec = rdcr & RDxR_SEC_MASK; } static void rtsr_clear_bits(struct pxa_rtc *pxa_rtc, u32 mask) { u32 rtsr; rtsr = rtc_readl(pxa_rtc, RTSR); rtsr &= ~RTSR_TRIG_MASK; rtsr &= ~mask; rtc_writel(pxa_rtc, RTSR, rtsr); } static void rtsr_set_bits(struct pxa_rtc *pxa_rtc, u32 mask) { u32 rtsr; rtsr = rtc_readl(pxa_rtc, RTSR); rtsr &= ~RTSR_TRIG_MASK; rtsr |= mask; rtc_writel(pxa_rtc, RTSR, rtsr); } static irqreturn_t pxa_rtc_irq(int irq, void *dev_id) { struct platform_device *pdev = to_platform_device(dev_id); struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); u32 rtsr; unsigned long events = 0; spin_lock(&pxa_rtc->lock); /* clear interrupt sources */ rtsr = rtc_readl(pxa_rtc, RTSR); rtc_writel(pxa_rtc, RTSR, rtsr); /* temporary disable rtc interrupts */ rtsr_clear_bits(pxa_rtc, RTSR_RDALE1 | RTSR_PIALE | RTSR_HZE); /* clear alarm interrupt if it has occurred */ if (rtsr & RTSR_RDAL1) rtsr &= ~RTSR_RDALE1; /* update irq data & counter */ if (rtsr & RTSR_RDAL1) events |= RTC_AF | RTC_IRQF; if (rtsr & RTSR_HZ) events |= RTC_UF | RTC_IRQF; if (rtsr & RTSR_PIAL) events |= RTC_PF | RTC_IRQF; rtc_update_irq(pxa_rtc->rtc, 1, events); /* enable back rtc interrupts */ rtc_writel(pxa_rtc, RTSR, rtsr & ~RTSR_TRIG_MASK); spin_unlock(&pxa_rtc->lock); return IRQ_HANDLED; } static int pxa_rtc_open(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); int ret; ret = request_irq(pxa_rtc->irq_1Hz, pxa_rtc_irq, 0, "rtc 1Hz", dev); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_1Hz, ret); goto err_irq_1Hz; } ret = request_irq(pxa_rtc->irq_Alrm, pxa_rtc_irq, 0, "rtc Alrm", dev); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_Alrm, ret); goto err_irq_Alrm; } return 0; err_irq_Alrm: free_irq(pxa_rtc->irq_1Hz, dev); err_irq_1Hz: return ret; } static void pxa_rtc_release(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); spin_lock_irq(&pxa_rtc->lock); rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); spin_unlock_irq(&pxa_rtc->lock); free_irq(pxa_rtc->irq_Alrm, dev); free_irq(pxa_rtc->irq_1Hz, dev); } static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); spin_lock_irq(&pxa_rtc->lock); if (enabled) rtsr_set_bits(pxa_rtc, RTSR_RDALE1); else rtsr_clear_bits(pxa_rtc, RTSR_RDALE1); spin_unlock_irq(&pxa_rtc->lock); return 0; } static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rycr, rdcr; rycr = rtc_readl(pxa_rtc, RYCR); rdcr = rtc_readl(pxa_rtc, RDCR); tm_calc(rycr, rdcr, tm); return 0; } static int pxa_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); rtc_writel(pxa_rtc, RYCR, ryxr_calc(tm)); rtc_writel(pxa_rtc, RDCR, rdxr_calc(tm)); return 0; } static int pxa_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rtsr, ryar, rdar; ryar = rtc_readl(pxa_rtc, RYAR1); rdar = rtc_readl(pxa_rtc, RDAR1); tm_calc(ryar, rdar, &alrm->time); rtsr = rtc_readl(pxa_rtc, RTSR); alrm->enabled = (rtsr & RTSR_RDALE1) ? 1 : 0; alrm->pending = (rtsr & RTSR_RDAL1) ? 1 : 0; return 0; } static int pxa_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rtsr; spin_lock_irq(&pxa_rtc->lock); rtc_writel(pxa_rtc, RYAR1, ryxr_calc(&alrm->time)); rtc_writel(pxa_rtc, RDAR1, rdxr_calc(&alrm->time)); rtsr = rtc_readl(pxa_rtc, RTSR); if (alrm->enabled) rtsr |= RTSR_RDALE1; else rtsr &= ~RTSR_RDALE1; rtc_writel(pxa_rtc, RTSR, rtsr); spin_unlock_irq(&pxa_rtc->lock); return 0; } static int pxa_rtc_proc(struct device *dev, struct seq_file *seq) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); seq_printf(seq, "trim/divider\t: 0x%08x\n", rtc_readl(pxa_rtc, RTTR)); seq_printf(seq, "update_IRQ\t: %s\n", (rtc_readl(pxa_rtc, RTSR) & RTSR_HZE) ? "yes" : "no"); seq_printf(seq, "periodic_IRQ\t: %s\n", (rtc_readl(pxa_rtc, RTSR) & RTSR_PIALE) ? "yes" : "no"); seq_printf(seq, "periodic_freq\t: %u\n", rtc_readl(pxa_rtc, PIAR)); return 0; } static const struct rtc_class_ops pxa_rtc_ops = { .open = pxa_rtc_open, .release = pxa_rtc_release, .read_time = pxa_rtc_read_time, .set_time = pxa_rtc_set_time, .read_alarm = pxa_rtc_read_alarm, .set_alarm = pxa_rtc_set_alarm, .alarm_irq_enable = pxa_alarm_irq_enable, .proc = pxa_rtc_proc, }; static int __init pxa_rtc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pxa_rtc *pxa_rtc; int ret; u32 rttr; pxa_rtc = kzalloc(sizeof(struct pxa_rtc), GFP_KERNEL); if (!pxa_rtc) return -ENOMEM; spin_lock_init(&pxa_rtc->lock); platform_set_drvdata(pdev, pxa_rtc); ret = -ENXIO; pxa_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!pxa_rtc->ress) { dev_err(dev, "No I/O memory resource defined\n"); goto err_ress; } pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0); if (pxa_rtc->irq_1Hz < 0) { dev_err(dev, "No 1Hz IRQ resource defined\n"); goto err_ress; } pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1); if (pxa_rtc->irq_Alrm < 0) { dev_err(dev, "No alarm IRQ resource defined\n"); goto err_ress; } ret = -ENOMEM; pxa_rtc->base = ioremap(pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n"); goto err_map; } /* * If the clock divider is uninitialized then reset it to the * default value to get the 1Hz clock. */ if (rtc_readl(pxa_rtc, RTTR) == 0) { rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); rtc_writel(pxa_rtc, RTTR, rttr); dev_warn(dev, "warning: initializing default clock" " divider/trim value\n"); } rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); pxa_rtc->rtc = rtc_device_register("pxa-rtc", &pdev->dev, &pxa_rtc_ops, THIS_MODULE); ret = PTR_ERR(pxa_rtc->rtc); if (IS_ERR(pxa_rtc->rtc)) { dev_err(dev, "Failed to register RTC device -> %d\n", ret); goto err_rtc_reg; } device_init_wakeup(dev, 1); return 0; err_rtc_reg: iounmap(pxa_rtc->base); err_ress: err_map: kfree(pxa_rtc); return ret; } static int __exit pxa_rtc_remove(struct platform_device *pdev) { struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); rtc_device_unregister(pxa_rtc->rtc); spin_lock_irq(&pxa_rtc->lock); iounmap(pxa_rtc->base); spin_unlock_irq(&pxa_rtc->lock); kfree(pxa_rtc); return 0; } #ifdef CONFIG_PM static int pxa_rtc_suspend(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(pxa_rtc->irq_Alrm); return 0; } static int pxa_rtc_resume(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(pxa_rtc->irq_Alrm); return 0; } static const struct dev_pm_ops pxa_rtc_pm_ops = { .suspend = pxa_rtc_suspend, .resume = pxa_rtc_resume, }; #endif static struct platform_driver pxa_rtc_driver = { .remove = __exit_p(pxa_rtc_remove), .driver = { .name = "pxa-rtc", #ifdef CONFIG_PM .pm = &pxa_rtc_pm_ops, #endif }, }; static int __init pxa_rtc_init(void) { if (cpu_is_pxa27x() || cpu_is_pxa3xx()) return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe); return -ENODEV; } static void __exit pxa_rtc_exit(void) { platform_driver_unregister(&pxa_rtc_driver); } module_init(pxa_rtc_init); module_exit(pxa_rtc_exit); MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); MODULE_DESCRIPTION("PXA27x/PXA3xx Realtime Clock Driver (RTC)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa-rtc");
gpl-2.0
z8cpaul/lsikernel-3.14
arch/arm/mach-ixp4xx/wg302v2-pci.c
4925
1431
/* * arch/arch/mach-ixp4xx/wg302v2-pci.c * * PCI setup routines for the Netgear WG302 v2 and WAG302 v2 * * Copyright (C) 2007 Imre Kaloz <kaloz@openwrt.org> * * based on coyote-pci.c: * Copyright (C) 2002 Jungo Software Technologies. * Copyright (C) 2003 MontaVista Software, Inc. * * Maintainer: Imre Kaloz <kaloz@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/mach/pci.h> void __init wg302v2_pci_preinit(void) { irq_set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init wg302v2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (slot == 1) return IRQ_IXP4XX_GPIO8; else if (slot == 2) return IRQ_IXP4XX_GPIO9; else return -1; } struct hw_pci wg302v2_pci __initdata = { .nr_controllers = 1, .ops = &ixp4xx_ops, .preinit = wg302v2_pci_preinit, .setup = ixp4xx_setup, .map_irq = wg302v2_map_irq, }; int __init wg302v2_pci_init(void) { if (machine_is_wg302v2()) pci_common_init(&wg302v2_pci); return 0; } subsys_initcall(wg302v2_pci_init);
gpl-2.0
razrqcom-dev-team/android_kernel_motorola_msm8226
crypto/wp512.c
5181
61536
/* * Cryptographic API. * * Whirlpool hashing Algorithm * * The Whirlpool algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. It has been selected as one of cryptographic * primitives by the NESSIE project http://www.cryptonessie.org/ * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, August 23, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/types.h> #define WP512_DIGEST_SIZE 64 #define WP384_DIGEST_SIZE 48 #define WP256_DIGEST_SIZE 32 #define WP512_BLOCK_SIZE 64 #define WP512_LENGTHBYTES 32 #define WHIRLPOOL_ROUNDS 10 struct wp512_ctx { u8 bitLength[WP512_LENGTHBYTES]; u8 buffer[WP512_BLOCK_SIZE]; int bufferBits; int bufferPos; u64 hash[WP512_DIGEST_SIZE/8]; }; /* * Though Whirlpool is endianness-neutral, the encryption tables are listed * in BIG-ENDIAN format, which is adopted throughout this implementation * (but little-endian notation would be equally suitable if consistently * employed). */ static const u64 C0[256] = { 0x18186018c07830d8ULL, 0x23238c2305af4626ULL, 0xc6c63fc67ef991b8ULL, 0xe8e887e8136fcdfbULL, 0x878726874ca113cbULL, 0xb8b8dab8a9626d11ULL, 0x0101040108050209ULL, 0x4f4f214f426e9e0dULL, 0x3636d836adee6c9bULL, 0xa6a6a2a6590451ffULL, 0xd2d26fd2debdb90cULL, 0xf5f5f3f5fb06f70eULL, 0x7979f979ef80f296ULL, 0x6f6fa16f5fcede30ULL, 0x91917e91fcef3f6dULL, 0x52525552aa07a4f8ULL, 0x60609d6027fdc047ULL, 0xbcbccabc89766535ULL, 0x9b9b569baccd2b37ULL, 0x8e8e028e048c018aULL, 0xa3a3b6a371155bd2ULL, 0x0c0c300c603c186cULL, 0x7b7bf17bff8af684ULL, 0x3535d435b5e16a80ULL, 0x1d1d741de8693af5ULL, 0xe0e0a7e05347ddb3ULL, 0xd7d77bd7f6acb321ULL, 0xc2c22fc25eed999cULL, 0x2e2eb82e6d965c43ULL, 0x4b4b314b627a9629ULL, 0xfefedffea321e15dULL, 0x575741578216aed5ULL, 0x15155415a8412abdULL, 0x7777c1779fb6eee8ULL, 0x3737dc37a5eb6e92ULL, 0xe5e5b3e57b56d79eULL, 0x9f9f469f8cd92313ULL, 0xf0f0e7f0d317fd23ULL, 0x4a4a354a6a7f9420ULL, 0xdada4fda9e95a944ULL, 0x58587d58fa25b0a2ULL, 0xc9c903c906ca8fcfULL, 0x2929a429558d527cULL, 0x0a0a280a5022145aULL, 0xb1b1feb1e14f7f50ULL, 0xa0a0baa0691a5dc9ULL, 0x6b6bb16b7fdad614ULL, 0x85852e855cab17d9ULL, 0xbdbdcebd8173673cULL, 0x5d5d695dd234ba8fULL, 0x1010401080502090ULL, 0xf4f4f7f4f303f507ULL, 0xcbcb0bcb16c08bddULL, 0x3e3ef83eedc67cd3ULL, 0x0505140528110a2dULL, 0x676781671fe6ce78ULL, 0xe4e4b7e47353d597ULL, 0x27279c2725bb4e02ULL, 0x4141194132588273ULL, 0x8b8b168b2c9d0ba7ULL, 0xa7a7a6a7510153f6ULL, 0x7d7de97dcf94fab2ULL, 0x95956e95dcfb3749ULL, 0xd8d847d88e9fad56ULL, 0xfbfbcbfb8b30eb70ULL, 0xeeee9fee2371c1cdULL, 0x7c7ced7cc791f8bbULL, 0x6666856617e3cc71ULL, 0xdddd53dda68ea77bULL, 0x17175c17b84b2eafULL, 0x4747014702468e45ULL, 0x9e9e429e84dc211aULL, 0xcaca0fca1ec589d4ULL, 0x2d2db42d75995a58ULL, 0xbfbfc6bf9179632eULL, 0x07071c07381b0e3fULL, 0xadad8ead012347acULL, 0x5a5a755aea2fb4b0ULL, 0x838336836cb51befULL, 0x3333cc3385ff66b6ULL, 0x636391633ff2c65cULL, 0x02020802100a0412ULL, 0xaaaa92aa39384993ULL, 0x7171d971afa8e2deULL, 0xc8c807c80ecf8dc6ULL, 0x19196419c87d32d1ULL, 0x494939497270923bULL, 0xd9d943d9869aaf5fULL, 0xf2f2eff2c31df931ULL, 0xe3e3abe34b48dba8ULL, 0x5b5b715be22ab6b9ULL, 0x88881a8834920dbcULL, 0x9a9a529aa4c8293eULL, 0x262698262dbe4c0bULL, 0x3232c8328dfa64bfULL, 0xb0b0fab0e94a7d59ULL, 0xe9e983e91b6acff2ULL, 0x0f0f3c0f78331e77ULL, 0xd5d573d5e6a6b733ULL, 0x80803a8074ba1df4ULL, 0xbebec2be997c6127ULL, 0xcdcd13cd26de87ebULL, 0x3434d034bde46889ULL, 0x48483d487a759032ULL, 0xffffdbffab24e354ULL, 0x7a7af57af78ff48dULL, 0x90907a90f4ea3d64ULL, 0x5f5f615fc23ebe9dULL, 0x202080201da0403dULL, 0x6868bd6867d5d00fULL, 0x1a1a681ad07234caULL, 0xaeae82ae192c41b7ULL, 0xb4b4eab4c95e757dULL, 0x54544d549a19a8ceULL, 0x93937693ece53b7fULL, 0x222288220daa442fULL, 0x64648d6407e9c863ULL, 0xf1f1e3f1db12ff2aULL, 0x7373d173bfa2e6ccULL, 0x12124812905a2482ULL, 0x40401d403a5d807aULL, 0x0808200840281048ULL, 0xc3c32bc356e89b95ULL, 0xecec97ec337bc5dfULL, 0xdbdb4bdb9690ab4dULL, 0xa1a1bea1611f5fc0ULL, 0x8d8d0e8d1c830791ULL, 0x3d3df43df5c97ac8ULL, 0x97976697ccf1335bULL, 0x0000000000000000ULL, 0xcfcf1bcf36d483f9ULL, 0x2b2bac2b4587566eULL, 0x7676c57697b3ece1ULL, 0x8282328264b019e6ULL, 0xd6d67fd6fea9b128ULL, 0x1b1b6c1bd87736c3ULL, 0xb5b5eeb5c15b7774ULL, 0xafaf86af112943beULL, 0x6a6ab56a77dfd41dULL, 0x50505d50ba0da0eaULL, 0x45450945124c8a57ULL, 0xf3f3ebf3cb18fb38ULL, 0x3030c0309df060adULL, 0xefef9bef2b74c3c4ULL, 0x3f3ffc3fe5c37edaULL, 0x55554955921caac7ULL, 0xa2a2b2a2791059dbULL, 0xeaea8fea0365c9e9ULL, 0x656589650fecca6aULL, 0xbabad2bab9686903ULL, 0x2f2fbc2f65935e4aULL, 0xc0c027c04ee79d8eULL, 0xdede5fdebe81a160ULL, 0x1c1c701ce06c38fcULL, 0xfdfdd3fdbb2ee746ULL, 0x4d4d294d52649a1fULL, 0x92927292e4e03976ULL, 0x7575c9758fbceafaULL, 0x06061806301e0c36ULL, 0x8a8a128a249809aeULL, 0xb2b2f2b2f940794bULL, 0xe6e6bfe66359d185ULL, 0x0e0e380e70361c7eULL, 0x1f1f7c1ff8633ee7ULL, 0x6262956237f7c455ULL, 0xd4d477d4eea3b53aULL, 0xa8a89aa829324d81ULL, 0x96966296c4f43152ULL, 0xf9f9c3f99b3aef62ULL, 0xc5c533c566f697a3ULL, 0x2525942535b14a10ULL, 0x59597959f220b2abULL, 0x84842a8454ae15d0ULL, 0x7272d572b7a7e4c5ULL, 0x3939e439d5dd72ecULL, 0x4c4c2d4c5a619816ULL, 0x5e5e655eca3bbc94ULL, 0x7878fd78e785f09fULL, 0x3838e038ddd870e5ULL, 0x8c8c0a8c14860598ULL, 0xd1d163d1c6b2bf17ULL, 0xa5a5aea5410b57e4ULL, 0xe2e2afe2434dd9a1ULL, 0x616199612ff8c24eULL, 0xb3b3f6b3f1457b42ULL, 0x2121842115a54234ULL, 0x9c9c4a9c94d62508ULL, 0x1e1e781ef0663ceeULL, 0x4343114322528661ULL, 0xc7c73bc776fc93b1ULL, 0xfcfcd7fcb32be54fULL, 0x0404100420140824ULL, 0x51515951b208a2e3ULL, 0x99995e99bcc72f25ULL, 0x6d6da96d4fc4da22ULL, 0x0d0d340d68391a65ULL, 0xfafacffa8335e979ULL, 0xdfdf5bdfb684a369ULL, 0x7e7ee57ed79bfca9ULL, 0x242490243db44819ULL, 0x3b3bec3bc5d776feULL, 0xabab96ab313d4b9aULL, 0xcece1fce3ed181f0ULL, 0x1111441188552299ULL, 0x8f8f068f0c890383ULL, 0x4e4e254e4a6b9c04ULL, 0xb7b7e6b7d1517366ULL, 0xebeb8beb0b60cbe0ULL, 0x3c3cf03cfdcc78c1ULL, 0x81813e817cbf1ffdULL, 0x94946a94d4fe3540ULL, 0xf7f7fbf7eb0cf31cULL, 0xb9b9deb9a1676f18ULL, 0x13134c13985f268bULL, 0x2c2cb02c7d9c5851ULL, 0xd3d36bd3d6b8bb05ULL, 0xe7e7bbe76b5cd38cULL, 0x6e6ea56e57cbdc39ULL, 0xc4c437c46ef395aaULL, 0x03030c03180f061bULL, 0x565645568a13acdcULL, 0x44440d441a49885eULL, 0x7f7fe17fdf9efea0ULL, 0xa9a99ea921374f88ULL, 0x2a2aa82a4d825467ULL, 0xbbbbd6bbb16d6b0aULL, 0xc1c123c146e29f87ULL, 0x53535153a202a6f1ULL, 0xdcdc57dcae8ba572ULL, 0x0b0b2c0b58271653ULL, 0x9d9d4e9d9cd32701ULL, 0x6c6cad6c47c1d82bULL, 0x3131c43195f562a4ULL, 0x7474cd7487b9e8f3ULL, 0xf6f6fff6e309f115ULL, 0x464605460a438c4cULL, 0xacac8aac092645a5ULL, 0x89891e893c970fb5ULL, 0x14145014a04428b4ULL, 0xe1e1a3e15b42dfbaULL, 0x16165816b04e2ca6ULL, 0x3a3ae83acdd274f7ULL, 0x6969b9696fd0d206ULL, 0x09092409482d1241ULL, 0x7070dd70a7ade0d7ULL, 0xb6b6e2b6d954716fULL, 0xd0d067d0ceb7bd1eULL, 0xeded93ed3b7ec7d6ULL, 0xcccc17cc2edb85e2ULL, 0x424215422a578468ULL, 0x98985a98b4c22d2cULL, 0xa4a4aaa4490e55edULL, 0x2828a0285d885075ULL, 0x5c5c6d5cda31b886ULL, 0xf8f8c7f8933fed6bULL, 0x8686228644a411c2ULL, }; static const u64 C1[256] = { 0xd818186018c07830ULL, 0x2623238c2305af46ULL, 0xb8c6c63fc67ef991ULL, 0xfbe8e887e8136fcdULL, 0xcb878726874ca113ULL, 0x11b8b8dab8a9626dULL, 0x0901010401080502ULL, 0x0d4f4f214f426e9eULL, 0x9b3636d836adee6cULL, 0xffa6a6a2a6590451ULL, 0x0cd2d26fd2debdb9ULL, 0x0ef5f5f3f5fb06f7ULL, 0x967979f979ef80f2ULL, 0x306f6fa16f5fcedeULL, 0x6d91917e91fcef3fULL, 0xf852525552aa07a4ULL, 0x4760609d6027fdc0ULL, 0x35bcbccabc897665ULL, 0x379b9b569baccd2bULL, 0x8a8e8e028e048c01ULL, 0xd2a3a3b6a371155bULL, 0x6c0c0c300c603c18ULL, 0x847b7bf17bff8af6ULL, 0x803535d435b5e16aULL, 0xf51d1d741de8693aULL, 0xb3e0e0a7e05347ddULL, 0x21d7d77bd7f6acb3ULL, 0x9cc2c22fc25eed99ULL, 0x432e2eb82e6d965cULL, 0x294b4b314b627a96ULL, 0x5dfefedffea321e1ULL, 0xd5575741578216aeULL, 0xbd15155415a8412aULL, 0xe87777c1779fb6eeULL, 0x923737dc37a5eb6eULL, 0x9ee5e5b3e57b56d7ULL, 0x139f9f469f8cd923ULL, 0x23f0f0e7f0d317fdULL, 0x204a4a354a6a7f94ULL, 0x44dada4fda9e95a9ULL, 0xa258587d58fa25b0ULL, 0xcfc9c903c906ca8fULL, 0x7c2929a429558d52ULL, 0x5a0a0a280a502214ULL, 0x50b1b1feb1e14f7fULL, 0xc9a0a0baa0691a5dULL, 0x146b6bb16b7fdad6ULL, 0xd985852e855cab17ULL, 0x3cbdbdcebd817367ULL, 0x8f5d5d695dd234baULL, 0x9010104010805020ULL, 0x07f4f4f7f4f303f5ULL, 0xddcbcb0bcb16c08bULL, 0xd33e3ef83eedc67cULL, 0x2d0505140528110aULL, 0x78676781671fe6ceULL, 0x97e4e4b7e47353d5ULL, 0x0227279c2725bb4eULL, 0x7341411941325882ULL, 0xa78b8b168b2c9d0bULL, 0xf6a7a7a6a7510153ULL, 0xb27d7de97dcf94faULL, 0x4995956e95dcfb37ULL, 0x56d8d847d88e9fadULL, 0x70fbfbcbfb8b30ebULL, 0xcdeeee9fee2371c1ULL, 0xbb7c7ced7cc791f8ULL, 0x716666856617e3ccULL, 0x7bdddd53dda68ea7ULL, 0xaf17175c17b84b2eULL, 0x454747014702468eULL, 0x1a9e9e429e84dc21ULL, 0xd4caca0fca1ec589ULL, 0x582d2db42d75995aULL, 0x2ebfbfc6bf917963ULL, 0x3f07071c07381b0eULL, 0xacadad8ead012347ULL, 0xb05a5a755aea2fb4ULL, 0xef838336836cb51bULL, 0xb63333cc3385ff66ULL, 0x5c636391633ff2c6ULL, 0x1202020802100a04ULL, 0x93aaaa92aa393849ULL, 0xde7171d971afa8e2ULL, 0xc6c8c807c80ecf8dULL, 0xd119196419c87d32ULL, 0x3b49493949727092ULL, 0x5fd9d943d9869aafULL, 0x31f2f2eff2c31df9ULL, 0xa8e3e3abe34b48dbULL, 0xb95b5b715be22ab6ULL, 0xbc88881a8834920dULL, 0x3e9a9a529aa4c829ULL, 0x0b262698262dbe4cULL, 0xbf3232c8328dfa64ULL, 0x59b0b0fab0e94a7dULL, 0xf2e9e983e91b6acfULL, 0x770f0f3c0f78331eULL, 0x33d5d573d5e6a6b7ULL, 0xf480803a8074ba1dULL, 0x27bebec2be997c61ULL, 0xebcdcd13cd26de87ULL, 0x893434d034bde468ULL, 0x3248483d487a7590ULL, 0x54ffffdbffab24e3ULL, 0x8d7a7af57af78ff4ULL, 0x6490907a90f4ea3dULL, 0x9d5f5f615fc23ebeULL, 0x3d202080201da040ULL, 0x0f6868bd6867d5d0ULL, 0xca1a1a681ad07234ULL, 0xb7aeae82ae192c41ULL, 0x7db4b4eab4c95e75ULL, 0xce54544d549a19a8ULL, 0x7f93937693ece53bULL, 0x2f222288220daa44ULL, 0x6364648d6407e9c8ULL, 0x2af1f1e3f1db12ffULL, 0xcc7373d173bfa2e6ULL, 0x8212124812905a24ULL, 0x7a40401d403a5d80ULL, 0x4808082008402810ULL, 0x95c3c32bc356e89bULL, 0xdfecec97ec337bc5ULL, 0x4ddbdb4bdb9690abULL, 0xc0a1a1bea1611f5fULL, 0x918d8d0e8d1c8307ULL, 0xc83d3df43df5c97aULL, 0x5b97976697ccf133ULL, 0x0000000000000000ULL, 0xf9cfcf1bcf36d483ULL, 0x6e2b2bac2b458756ULL, 0xe17676c57697b3ecULL, 0xe68282328264b019ULL, 0x28d6d67fd6fea9b1ULL, 0xc31b1b6c1bd87736ULL, 0x74b5b5eeb5c15b77ULL, 0xbeafaf86af112943ULL, 0x1d6a6ab56a77dfd4ULL, 0xea50505d50ba0da0ULL, 0x5745450945124c8aULL, 0x38f3f3ebf3cb18fbULL, 0xad3030c0309df060ULL, 0xc4efef9bef2b74c3ULL, 0xda3f3ffc3fe5c37eULL, 0xc755554955921caaULL, 0xdba2a2b2a2791059ULL, 0xe9eaea8fea0365c9ULL, 0x6a656589650feccaULL, 0x03babad2bab96869ULL, 0x4a2f2fbc2f65935eULL, 0x8ec0c027c04ee79dULL, 0x60dede5fdebe81a1ULL, 0xfc1c1c701ce06c38ULL, 0x46fdfdd3fdbb2ee7ULL, 0x1f4d4d294d52649aULL, 0x7692927292e4e039ULL, 0xfa7575c9758fbceaULL, 0x3606061806301e0cULL, 0xae8a8a128a249809ULL, 0x4bb2b2f2b2f94079ULL, 0x85e6e6bfe66359d1ULL, 0x7e0e0e380e70361cULL, 0xe71f1f7c1ff8633eULL, 0x556262956237f7c4ULL, 0x3ad4d477d4eea3b5ULL, 0x81a8a89aa829324dULL, 0x5296966296c4f431ULL, 0x62f9f9c3f99b3aefULL, 0xa3c5c533c566f697ULL, 0x102525942535b14aULL, 0xab59597959f220b2ULL, 0xd084842a8454ae15ULL, 0xc57272d572b7a7e4ULL, 0xec3939e439d5dd72ULL, 0x164c4c2d4c5a6198ULL, 0x945e5e655eca3bbcULL, 0x9f7878fd78e785f0ULL, 0xe53838e038ddd870ULL, 0x988c8c0a8c148605ULL, 0x17d1d163d1c6b2bfULL, 0xe4a5a5aea5410b57ULL, 0xa1e2e2afe2434dd9ULL, 0x4e616199612ff8c2ULL, 0x42b3b3f6b3f1457bULL, 0x342121842115a542ULL, 0x089c9c4a9c94d625ULL, 0xee1e1e781ef0663cULL, 0x6143431143225286ULL, 0xb1c7c73bc776fc93ULL, 0x4ffcfcd7fcb32be5ULL, 0x2404041004201408ULL, 0xe351515951b208a2ULL, 0x2599995e99bcc72fULL, 0x226d6da96d4fc4daULL, 0x650d0d340d68391aULL, 0x79fafacffa8335e9ULL, 0x69dfdf5bdfb684a3ULL, 0xa97e7ee57ed79bfcULL, 0x19242490243db448ULL, 0xfe3b3bec3bc5d776ULL, 0x9aabab96ab313d4bULL, 0xf0cece1fce3ed181ULL, 0x9911114411885522ULL, 0x838f8f068f0c8903ULL, 0x044e4e254e4a6b9cULL, 0x66b7b7e6b7d15173ULL, 0xe0ebeb8beb0b60cbULL, 0xc13c3cf03cfdcc78ULL, 0xfd81813e817cbf1fULL, 0x4094946a94d4fe35ULL, 0x1cf7f7fbf7eb0cf3ULL, 0x18b9b9deb9a1676fULL, 0x8b13134c13985f26ULL, 0x512c2cb02c7d9c58ULL, 0x05d3d36bd3d6b8bbULL, 0x8ce7e7bbe76b5cd3ULL, 0x396e6ea56e57cbdcULL, 0xaac4c437c46ef395ULL, 0x1b03030c03180f06ULL, 0xdc565645568a13acULL, 0x5e44440d441a4988ULL, 0xa07f7fe17fdf9efeULL, 0x88a9a99ea921374fULL, 0x672a2aa82a4d8254ULL, 0x0abbbbd6bbb16d6bULL, 0x87c1c123c146e29fULL, 0xf153535153a202a6ULL, 0x72dcdc57dcae8ba5ULL, 0x530b0b2c0b582716ULL, 0x019d9d4e9d9cd327ULL, 0x2b6c6cad6c47c1d8ULL, 0xa43131c43195f562ULL, 0xf37474cd7487b9e8ULL, 0x15f6f6fff6e309f1ULL, 0x4c464605460a438cULL, 0xa5acac8aac092645ULL, 0xb589891e893c970fULL, 0xb414145014a04428ULL, 0xbae1e1a3e15b42dfULL, 0xa616165816b04e2cULL, 0xf73a3ae83acdd274ULL, 0x066969b9696fd0d2ULL, 0x4109092409482d12ULL, 0xd77070dd70a7ade0ULL, 0x6fb6b6e2b6d95471ULL, 0x1ed0d067d0ceb7bdULL, 0xd6eded93ed3b7ec7ULL, 0xe2cccc17cc2edb85ULL, 0x68424215422a5784ULL, 0x2c98985a98b4c22dULL, 0xeda4a4aaa4490e55ULL, 0x752828a0285d8850ULL, 0x865c5c6d5cda31b8ULL, 0x6bf8f8c7f8933fedULL, 0xc28686228644a411ULL, }; static const u64 C2[256] = { 0x30d818186018c078ULL, 0x462623238c2305afULL, 0x91b8c6c63fc67ef9ULL, 0xcdfbe8e887e8136fULL, 0x13cb878726874ca1ULL, 0x6d11b8b8dab8a962ULL, 0x0209010104010805ULL, 0x9e0d4f4f214f426eULL, 0x6c9b3636d836adeeULL, 0x51ffa6a6a2a65904ULL, 0xb90cd2d26fd2debdULL, 0xf70ef5f5f3f5fb06ULL, 0xf2967979f979ef80ULL, 0xde306f6fa16f5fceULL, 0x3f6d91917e91fcefULL, 0xa4f852525552aa07ULL, 0xc04760609d6027fdULL, 0x6535bcbccabc8976ULL, 0x2b379b9b569baccdULL, 0x018a8e8e028e048cULL, 0x5bd2a3a3b6a37115ULL, 0x186c0c0c300c603cULL, 0xf6847b7bf17bff8aULL, 0x6a803535d435b5e1ULL, 0x3af51d1d741de869ULL, 0xddb3e0e0a7e05347ULL, 0xb321d7d77bd7f6acULL, 0x999cc2c22fc25eedULL, 0x5c432e2eb82e6d96ULL, 0x96294b4b314b627aULL, 0xe15dfefedffea321ULL, 0xaed5575741578216ULL, 0x2abd15155415a841ULL, 0xeee87777c1779fb6ULL, 0x6e923737dc37a5ebULL, 0xd79ee5e5b3e57b56ULL, 0x23139f9f469f8cd9ULL, 0xfd23f0f0e7f0d317ULL, 0x94204a4a354a6a7fULL, 0xa944dada4fda9e95ULL, 0xb0a258587d58fa25ULL, 0x8fcfc9c903c906caULL, 0x527c2929a429558dULL, 0x145a0a0a280a5022ULL, 0x7f50b1b1feb1e14fULL, 0x5dc9a0a0baa0691aULL, 0xd6146b6bb16b7fdaULL, 0x17d985852e855cabULL, 0x673cbdbdcebd8173ULL, 0xba8f5d5d695dd234ULL, 0x2090101040108050ULL, 0xf507f4f4f7f4f303ULL, 0x8bddcbcb0bcb16c0ULL, 0x7cd33e3ef83eedc6ULL, 0x0a2d050514052811ULL, 0xce78676781671fe6ULL, 0xd597e4e4b7e47353ULL, 0x4e0227279c2725bbULL, 0x8273414119413258ULL, 0x0ba78b8b168b2c9dULL, 0x53f6a7a7a6a75101ULL, 0xfab27d7de97dcf94ULL, 0x374995956e95dcfbULL, 0xad56d8d847d88e9fULL, 0xeb70fbfbcbfb8b30ULL, 0xc1cdeeee9fee2371ULL, 0xf8bb7c7ced7cc791ULL, 0xcc716666856617e3ULL, 0xa77bdddd53dda68eULL, 0x2eaf17175c17b84bULL, 0x8e45474701470246ULL, 0x211a9e9e429e84dcULL, 0x89d4caca0fca1ec5ULL, 0x5a582d2db42d7599ULL, 0x632ebfbfc6bf9179ULL, 0x0e3f07071c07381bULL, 0x47acadad8ead0123ULL, 0xb4b05a5a755aea2fULL, 0x1bef838336836cb5ULL, 0x66b63333cc3385ffULL, 0xc65c636391633ff2ULL, 0x041202020802100aULL, 0x4993aaaa92aa3938ULL, 0xe2de7171d971afa8ULL, 0x8dc6c8c807c80ecfULL, 0x32d119196419c87dULL, 0x923b494939497270ULL, 0xaf5fd9d943d9869aULL, 0xf931f2f2eff2c31dULL, 0xdba8e3e3abe34b48ULL, 0xb6b95b5b715be22aULL, 0x0dbc88881a883492ULL, 0x293e9a9a529aa4c8ULL, 0x4c0b262698262dbeULL, 0x64bf3232c8328dfaULL, 0x7d59b0b0fab0e94aULL, 0xcff2e9e983e91b6aULL, 0x1e770f0f3c0f7833ULL, 0xb733d5d573d5e6a6ULL, 0x1df480803a8074baULL, 0x6127bebec2be997cULL, 0x87ebcdcd13cd26deULL, 0x68893434d034bde4ULL, 0x903248483d487a75ULL, 0xe354ffffdbffab24ULL, 0xf48d7a7af57af78fULL, 0x3d6490907a90f4eaULL, 0xbe9d5f5f615fc23eULL, 0x403d202080201da0ULL, 0xd00f6868bd6867d5ULL, 0x34ca1a1a681ad072ULL, 0x41b7aeae82ae192cULL, 0x757db4b4eab4c95eULL, 0xa8ce54544d549a19ULL, 0x3b7f93937693ece5ULL, 0x442f222288220daaULL, 0xc86364648d6407e9ULL, 0xff2af1f1e3f1db12ULL, 0xe6cc7373d173bfa2ULL, 0x248212124812905aULL, 0x807a40401d403a5dULL, 0x1048080820084028ULL, 0x9b95c3c32bc356e8ULL, 0xc5dfecec97ec337bULL, 0xab4ddbdb4bdb9690ULL, 0x5fc0a1a1bea1611fULL, 0x07918d8d0e8d1c83ULL, 0x7ac83d3df43df5c9ULL, 0x335b97976697ccf1ULL, 0x0000000000000000ULL, 0x83f9cfcf1bcf36d4ULL, 0x566e2b2bac2b4587ULL, 0xece17676c57697b3ULL, 0x19e68282328264b0ULL, 0xb128d6d67fd6fea9ULL, 0x36c31b1b6c1bd877ULL, 0x7774b5b5eeb5c15bULL, 0x43beafaf86af1129ULL, 0xd41d6a6ab56a77dfULL, 0xa0ea50505d50ba0dULL, 0x8a5745450945124cULL, 0xfb38f3f3ebf3cb18ULL, 0x60ad3030c0309df0ULL, 0xc3c4efef9bef2b74ULL, 0x7eda3f3ffc3fe5c3ULL, 0xaac755554955921cULL, 0x59dba2a2b2a27910ULL, 0xc9e9eaea8fea0365ULL, 0xca6a656589650fecULL, 0x6903babad2bab968ULL, 0x5e4a2f2fbc2f6593ULL, 0x9d8ec0c027c04ee7ULL, 0xa160dede5fdebe81ULL, 0x38fc1c1c701ce06cULL, 0xe746fdfdd3fdbb2eULL, 0x9a1f4d4d294d5264ULL, 0x397692927292e4e0ULL, 0xeafa7575c9758fbcULL, 0x0c3606061806301eULL, 0x09ae8a8a128a2498ULL, 0x794bb2b2f2b2f940ULL, 0xd185e6e6bfe66359ULL, 0x1c7e0e0e380e7036ULL, 0x3ee71f1f7c1ff863ULL, 0xc4556262956237f7ULL, 0xb53ad4d477d4eea3ULL, 0x4d81a8a89aa82932ULL, 0x315296966296c4f4ULL, 0xef62f9f9c3f99b3aULL, 0x97a3c5c533c566f6ULL, 0x4a102525942535b1ULL, 0xb2ab59597959f220ULL, 0x15d084842a8454aeULL, 0xe4c57272d572b7a7ULL, 0x72ec3939e439d5ddULL, 0x98164c4c2d4c5a61ULL, 0xbc945e5e655eca3bULL, 0xf09f7878fd78e785ULL, 0x70e53838e038ddd8ULL, 0x05988c8c0a8c1486ULL, 0xbf17d1d163d1c6b2ULL, 0x57e4a5a5aea5410bULL, 0xd9a1e2e2afe2434dULL, 0xc24e616199612ff8ULL, 0x7b42b3b3f6b3f145ULL, 0x42342121842115a5ULL, 0x25089c9c4a9c94d6ULL, 0x3cee1e1e781ef066ULL, 0x8661434311432252ULL, 0x93b1c7c73bc776fcULL, 0xe54ffcfcd7fcb32bULL, 0x0824040410042014ULL, 0xa2e351515951b208ULL, 0x2f2599995e99bcc7ULL, 0xda226d6da96d4fc4ULL, 0x1a650d0d340d6839ULL, 0xe979fafacffa8335ULL, 0xa369dfdf5bdfb684ULL, 0xfca97e7ee57ed79bULL, 0x4819242490243db4ULL, 0x76fe3b3bec3bc5d7ULL, 0x4b9aabab96ab313dULL, 0x81f0cece1fce3ed1ULL, 0x2299111144118855ULL, 0x03838f8f068f0c89ULL, 0x9c044e4e254e4a6bULL, 0x7366b7b7e6b7d151ULL, 0xcbe0ebeb8beb0b60ULL, 0x78c13c3cf03cfdccULL, 0x1ffd81813e817cbfULL, 0x354094946a94d4feULL, 0xf31cf7f7fbf7eb0cULL, 0x6f18b9b9deb9a167ULL, 0x268b13134c13985fULL, 0x58512c2cb02c7d9cULL, 0xbb05d3d36bd3d6b8ULL, 0xd38ce7e7bbe76b5cULL, 0xdc396e6ea56e57cbULL, 0x95aac4c437c46ef3ULL, 0x061b03030c03180fULL, 0xacdc565645568a13ULL, 0x885e44440d441a49ULL, 0xfea07f7fe17fdf9eULL, 0x4f88a9a99ea92137ULL, 0x54672a2aa82a4d82ULL, 0x6b0abbbbd6bbb16dULL, 0x9f87c1c123c146e2ULL, 0xa6f153535153a202ULL, 0xa572dcdc57dcae8bULL, 0x16530b0b2c0b5827ULL, 0x27019d9d4e9d9cd3ULL, 0xd82b6c6cad6c47c1ULL, 0x62a43131c43195f5ULL, 0xe8f37474cd7487b9ULL, 0xf115f6f6fff6e309ULL, 0x8c4c464605460a43ULL, 0x45a5acac8aac0926ULL, 0x0fb589891e893c97ULL, 0x28b414145014a044ULL, 0xdfbae1e1a3e15b42ULL, 0x2ca616165816b04eULL, 0x74f73a3ae83acdd2ULL, 0xd2066969b9696fd0ULL, 0x124109092409482dULL, 0xe0d77070dd70a7adULL, 0x716fb6b6e2b6d954ULL, 0xbd1ed0d067d0ceb7ULL, 0xc7d6eded93ed3b7eULL, 0x85e2cccc17cc2edbULL, 0x8468424215422a57ULL, 0x2d2c98985a98b4c2ULL, 0x55eda4a4aaa4490eULL, 0x50752828a0285d88ULL, 0xb8865c5c6d5cda31ULL, 0xed6bf8f8c7f8933fULL, 0x11c28686228644a4ULL, }; static const u64 C3[256] = { 0x7830d818186018c0ULL, 0xaf462623238c2305ULL, 0xf991b8c6c63fc67eULL, 0x6fcdfbe8e887e813ULL, 0xa113cb878726874cULL, 0x626d11b8b8dab8a9ULL, 0x0502090101040108ULL, 0x6e9e0d4f4f214f42ULL, 0xee6c9b3636d836adULL, 0x0451ffa6a6a2a659ULL, 0xbdb90cd2d26fd2deULL, 0x06f70ef5f5f3f5fbULL, 0x80f2967979f979efULL, 0xcede306f6fa16f5fULL, 0xef3f6d91917e91fcULL, 0x07a4f852525552aaULL, 0xfdc04760609d6027ULL, 0x766535bcbccabc89ULL, 0xcd2b379b9b569bacULL, 0x8c018a8e8e028e04ULL, 0x155bd2a3a3b6a371ULL, 0x3c186c0c0c300c60ULL, 0x8af6847b7bf17bffULL, 0xe16a803535d435b5ULL, 0x693af51d1d741de8ULL, 0x47ddb3e0e0a7e053ULL, 0xacb321d7d77bd7f6ULL, 0xed999cc2c22fc25eULL, 0x965c432e2eb82e6dULL, 0x7a96294b4b314b62ULL, 0x21e15dfefedffea3ULL, 0x16aed55757415782ULL, 0x412abd15155415a8ULL, 0xb6eee87777c1779fULL, 0xeb6e923737dc37a5ULL, 0x56d79ee5e5b3e57bULL, 0xd923139f9f469f8cULL, 0x17fd23f0f0e7f0d3ULL, 0x7f94204a4a354a6aULL, 0x95a944dada4fda9eULL, 0x25b0a258587d58faULL, 0xca8fcfc9c903c906ULL, 0x8d527c2929a42955ULL, 0x22145a0a0a280a50ULL, 0x4f7f50b1b1feb1e1ULL, 0x1a5dc9a0a0baa069ULL, 0xdad6146b6bb16b7fULL, 0xab17d985852e855cULL, 0x73673cbdbdcebd81ULL, 0x34ba8f5d5d695dd2ULL, 0x5020901010401080ULL, 0x03f507f4f4f7f4f3ULL, 0xc08bddcbcb0bcb16ULL, 0xc67cd33e3ef83eedULL, 0x110a2d0505140528ULL, 0xe6ce78676781671fULL, 0x53d597e4e4b7e473ULL, 0xbb4e0227279c2725ULL, 0x5882734141194132ULL, 0x9d0ba78b8b168b2cULL, 0x0153f6a7a7a6a751ULL, 0x94fab27d7de97dcfULL, 0xfb374995956e95dcULL, 0x9fad56d8d847d88eULL, 0x30eb70fbfbcbfb8bULL, 0x71c1cdeeee9fee23ULL, 0x91f8bb7c7ced7cc7ULL, 0xe3cc716666856617ULL, 0x8ea77bdddd53dda6ULL, 0x4b2eaf17175c17b8ULL, 0x468e454747014702ULL, 0xdc211a9e9e429e84ULL, 0xc589d4caca0fca1eULL, 0x995a582d2db42d75ULL, 0x79632ebfbfc6bf91ULL, 0x1b0e3f07071c0738ULL, 0x2347acadad8ead01ULL, 0x2fb4b05a5a755aeaULL, 0xb51bef838336836cULL, 0xff66b63333cc3385ULL, 0xf2c65c636391633fULL, 0x0a04120202080210ULL, 0x384993aaaa92aa39ULL, 0xa8e2de7171d971afULL, 0xcf8dc6c8c807c80eULL, 0x7d32d119196419c8ULL, 0x70923b4949394972ULL, 0x9aaf5fd9d943d986ULL, 0x1df931f2f2eff2c3ULL, 0x48dba8e3e3abe34bULL, 0x2ab6b95b5b715be2ULL, 0x920dbc88881a8834ULL, 0xc8293e9a9a529aa4ULL, 0xbe4c0b262698262dULL, 0xfa64bf3232c8328dULL, 0x4a7d59b0b0fab0e9ULL, 0x6acff2e9e983e91bULL, 0x331e770f0f3c0f78ULL, 0xa6b733d5d573d5e6ULL, 0xba1df480803a8074ULL, 0x7c6127bebec2be99ULL, 0xde87ebcdcd13cd26ULL, 0xe468893434d034bdULL, 0x75903248483d487aULL, 0x24e354ffffdbffabULL, 0x8ff48d7a7af57af7ULL, 0xea3d6490907a90f4ULL, 0x3ebe9d5f5f615fc2ULL, 0xa0403d202080201dULL, 0xd5d00f6868bd6867ULL, 0x7234ca1a1a681ad0ULL, 0x2c41b7aeae82ae19ULL, 0x5e757db4b4eab4c9ULL, 0x19a8ce54544d549aULL, 0xe53b7f93937693ecULL, 0xaa442f222288220dULL, 0xe9c86364648d6407ULL, 0x12ff2af1f1e3f1dbULL, 0xa2e6cc7373d173bfULL, 0x5a24821212481290ULL, 0x5d807a40401d403aULL, 0x2810480808200840ULL, 0xe89b95c3c32bc356ULL, 0x7bc5dfecec97ec33ULL, 0x90ab4ddbdb4bdb96ULL, 0x1f5fc0a1a1bea161ULL, 0x8307918d8d0e8d1cULL, 0xc97ac83d3df43df5ULL, 0xf1335b97976697ccULL, 0x0000000000000000ULL, 0xd483f9cfcf1bcf36ULL, 0x87566e2b2bac2b45ULL, 0xb3ece17676c57697ULL, 0xb019e68282328264ULL, 0xa9b128d6d67fd6feULL, 0x7736c31b1b6c1bd8ULL, 0x5b7774b5b5eeb5c1ULL, 0x2943beafaf86af11ULL, 0xdfd41d6a6ab56a77ULL, 0x0da0ea50505d50baULL, 0x4c8a574545094512ULL, 0x18fb38f3f3ebf3cbULL, 0xf060ad3030c0309dULL, 0x74c3c4efef9bef2bULL, 0xc37eda3f3ffc3fe5ULL, 0x1caac75555495592ULL, 0x1059dba2a2b2a279ULL, 0x65c9e9eaea8fea03ULL, 0xecca6a656589650fULL, 0x686903babad2bab9ULL, 0x935e4a2f2fbc2f65ULL, 0xe79d8ec0c027c04eULL, 0x81a160dede5fdebeULL, 0x6c38fc1c1c701ce0ULL, 0x2ee746fdfdd3fdbbULL, 0x649a1f4d4d294d52ULL, 0xe0397692927292e4ULL, 0xbceafa7575c9758fULL, 0x1e0c360606180630ULL, 0x9809ae8a8a128a24ULL, 0x40794bb2b2f2b2f9ULL, 0x59d185e6e6bfe663ULL, 0x361c7e0e0e380e70ULL, 0x633ee71f1f7c1ff8ULL, 0xf7c4556262956237ULL, 0xa3b53ad4d477d4eeULL, 0x324d81a8a89aa829ULL, 0xf4315296966296c4ULL, 0x3aef62f9f9c3f99bULL, 0xf697a3c5c533c566ULL, 0xb14a102525942535ULL, 0x20b2ab59597959f2ULL, 0xae15d084842a8454ULL, 0xa7e4c57272d572b7ULL, 0xdd72ec3939e439d5ULL, 0x6198164c4c2d4c5aULL, 0x3bbc945e5e655ecaULL, 0x85f09f7878fd78e7ULL, 0xd870e53838e038ddULL, 0x8605988c8c0a8c14ULL, 0xb2bf17d1d163d1c6ULL, 0x0b57e4a5a5aea541ULL, 0x4dd9a1e2e2afe243ULL, 0xf8c24e616199612fULL, 0x457b42b3b3f6b3f1ULL, 0xa542342121842115ULL, 0xd625089c9c4a9c94ULL, 0x663cee1e1e781ef0ULL, 0x5286614343114322ULL, 0xfc93b1c7c73bc776ULL, 0x2be54ffcfcd7fcb3ULL, 0x1408240404100420ULL, 0x08a2e351515951b2ULL, 0xc72f2599995e99bcULL, 0xc4da226d6da96d4fULL, 0x391a650d0d340d68ULL, 0x35e979fafacffa83ULL, 0x84a369dfdf5bdfb6ULL, 0x9bfca97e7ee57ed7ULL, 0xb44819242490243dULL, 0xd776fe3b3bec3bc5ULL, 0x3d4b9aabab96ab31ULL, 0xd181f0cece1fce3eULL, 0x5522991111441188ULL, 0x8903838f8f068f0cULL, 0x6b9c044e4e254e4aULL, 0x517366b7b7e6b7d1ULL, 0x60cbe0ebeb8beb0bULL, 0xcc78c13c3cf03cfdULL, 0xbf1ffd81813e817cULL, 0xfe354094946a94d4ULL, 0x0cf31cf7f7fbf7ebULL, 0x676f18b9b9deb9a1ULL, 0x5f268b13134c1398ULL, 0x9c58512c2cb02c7dULL, 0xb8bb05d3d36bd3d6ULL, 0x5cd38ce7e7bbe76bULL, 0xcbdc396e6ea56e57ULL, 0xf395aac4c437c46eULL, 0x0f061b03030c0318ULL, 0x13acdc565645568aULL, 0x49885e44440d441aULL, 0x9efea07f7fe17fdfULL, 0x374f88a9a99ea921ULL, 0x8254672a2aa82a4dULL, 0x6d6b0abbbbd6bbb1ULL, 0xe29f87c1c123c146ULL, 0x02a6f153535153a2ULL, 0x8ba572dcdc57dcaeULL, 0x2716530b0b2c0b58ULL, 0xd327019d9d4e9d9cULL, 0xc1d82b6c6cad6c47ULL, 0xf562a43131c43195ULL, 0xb9e8f37474cd7487ULL, 0x09f115f6f6fff6e3ULL, 0x438c4c464605460aULL, 0x2645a5acac8aac09ULL, 0x970fb589891e893cULL, 0x4428b414145014a0ULL, 0x42dfbae1e1a3e15bULL, 0x4e2ca616165816b0ULL, 0xd274f73a3ae83acdULL, 0xd0d2066969b9696fULL, 0x2d12410909240948ULL, 0xade0d77070dd70a7ULL, 0x54716fb6b6e2b6d9ULL, 0xb7bd1ed0d067d0ceULL, 0x7ec7d6eded93ed3bULL, 0xdb85e2cccc17cc2eULL, 0x578468424215422aULL, 0xc22d2c98985a98b4ULL, 0x0e55eda4a4aaa449ULL, 0x8850752828a0285dULL, 0x31b8865c5c6d5cdaULL, 0x3fed6bf8f8c7f893ULL, 0xa411c28686228644ULL, }; static const u64 C4[256] = { 0xc07830d818186018ULL, 0x05af462623238c23ULL, 0x7ef991b8c6c63fc6ULL, 0x136fcdfbe8e887e8ULL, 0x4ca113cb87872687ULL, 0xa9626d11b8b8dab8ULL, 0x0805020901010401ULL, 0x426e9e0d4f4f214fULL, 0xadee6c9b3636d836ULL, 0x590451ffa6a6a2a6ULL, 0xdebdb90cd2d26fd2ULL, 0xfb06f70ef5f5f3f5ULL, 0xef80f2967979f979ULL, 0x5fcede306f6fa16fULL, 0xfcef3f6d91917e91ULL, 0xaa07a4f852525552ULL, 0x27fdc04760609d60ULL, 0x89766535bcbccabcULL, 0xaccd2b379b9b569bULL, 0x048c018a8e8e028eULL, 0x71155bd2a3a3b6a3ULL, 0x603c186c0c0c300cULL, 0xff8af6847b7bf17bULL, 0xb5e16a803535d435ULL, 0xe8693af51d1d741dULL, 0x5347ddb3e0e0a7e0ULL, 0xf6acb321d7d77bd7ULL, 0x5eed999cc2c22fc2ULL, 0x6d965c432e2eb82eULL, 0x627a96294b4b314bULL, 0xa321e15dfefedffeULL, 0x8216aed557574157ULL, 0xa8412abd15155415ULL, 0x9fb6eee87777c177ULL, 0xa5eb6e923737dc37ULL, 0x7b56d79ee5e5b3e5ULL, 0x8cd923139f9f469fULL, 0xd317fd23f0f0e7f0ULL, 0x6a7f94204a4a354aULL, 0x9e95a944dada4fdaULL, 0xfa25b0a258587d58ULL, 0x06ca8fcfc9c903c9ULL, 0x558d527c2929a429ULL, 0x5022145a0a0a280aULL, 0xe14f7f50b1b1feb1ULL, 0x691a5dc9a0a0baa0ULL, 0x7fdad6146b6bb16bULL, 0x5cab17d985852e85ULL, 0x8173673cbdbdcebdULL, 0xd234ba8f5d5d695dULL, 0x8050209010104010ULL, 0xf303f507f4f4f7f4ULL, 0x16c08bddcbcb0bcbULL, 0xedc67cd33e3ef83eULL, 0x28110a2d05051405ULL, 0x1fe6ce7867678167ULL, 0x7353d597e4e4b7e4ULL, 0x25bb4e0227279c27ULL, 0x3258827341411941ULL, 0x2c9d0ba78b8b168bULL, 0x510153f6a7a7a6a7ULL, 0xcf94fab27d7de97dULL, 0xdcfb374995956e95ULL, 0x8e9fad56d8d847d8ULL, 0x8b30eb70fbfbcbfbULL, 0x2371c1cdeeee9feeULL, 0xc791f8bb7c7ced7cULL, 0x17e3cc7166668566ULL, 0xa68ea77bdddd53ddULL, 0xb84b2eaf17175c17ULL, 0x02468e4547470147ULL, 0x84dc211a9e9e429eULL, 0x1ec589d4caca0fcaULL, 0x75995a582d2db42dULL, 0x9179632ebfbfc6bfULL, 0x381b0e3f07071c07ULL, 0x012347acadad8eadULL, 0xea2fb4b05a5a755aULL, 0x6cb51bef83833683ULL, 0x85ff66b63333cc33ULL, 0x3ff2c65c63639163ULL, 0x100a041202020802ULL, 0x39384993aaaa92aaULL, 0xafa8e2de7171d971ULL, 0x0ecf8dc6c8c807c8ULL, 0xc87d32d119196419ULL, 0x7270923b49493949ULL, 0x869aaf5fd9d943d9ULL, 0xc31df931f2f2eff2ULL, 0x4b48dba8e3e3abe3ULL, 0xe22ab6b95b5b715bULL, 0x34920dbc88881a88ULL, 0xa4c8293e9a9a529aULL, 0x2dbe4c0b26269826ULL, 0x8dfa64bf3232c832ULL, 0xe94a7d59b0b0fab0ULL, 0x1b6acff2e9e983e9ULL, 0x78331e770f0f3c0fULL, 0xe6a6b733d5d573d5ULL, 0x74ba1df480803a80ULL, 0x997c6127bebec2beULL, 0x26de87ebcdcd13cdULL, 0xbde468893434d034ULL, 0x7a75903248483d48ULL, 0xab24e354ffffdbffULL, 0xf78ff48d7a7af57aULL, 0xf4ea3d6490907a90ULL, 0xc23ebe9d5f5f615fULL, 0x1da0403d20208020ULL, 0x67d5d00f6868bd68ULL, 0xd07234ca1a1a681aULL, 0x192c41b7aeae82aeULL, 0xc95e757db4b4eab4ULL, 0x9a19a8ce54544d54ULL, 0xece53b7f93937693ULL, 0x0daa442f22228822ULL, 0x07e9c86364648d64ULL, 0xdb12ff2af1f1e3f1ULL, 0xbfa2e6cc7373d173ULL, 0x905a248212124812ULL, 0x3a5d807a40401d40ULL, 0x4028104808082008ULL, 0x56e89b95c3c32bc3ULL, 0x337bc5dfecec97ecULL, 0x9690ab4ddbdb4bdbULL, 0x611f5fc0a1a1bea1ULL, 0x1c8307918d8d0e8dULL, 0xf5c97ac83d3df43dULL, 0xccf1335b97976697ULL, 0x0000000000000000ULL, 0x36d483f9cfcf1bcfULL, 0x4587566e2b2bac2bULL, 0x97b3ece17676c576ULL, 0x64b019e682823282ULL, 0xfea9b128d6d67fd6ULL, 0xd87736c31b1b6c1bULL, 0xc15b7774b5b5eeb5ULL, 0x112943beafaf86afULL, 0x77dfd41d6a6ab56aULL, 0xba0da0ea50505d50ULL, 0x124c8a5745450945ULL, 0xcb18fb38f3f3ebf3ULL, 0x9df060ad3030c030ULL, 0x2b74c3c4efef9befULL, 0xe5c37eda3f3ffc3fULL, 0x921caac755554955ULL, 0x791059dba2a2b2a2ULL, 0x0365c9e9eaea8feaULL, 0x0fecca6a65658965ULL, 0xb9686903babad2baULL, 0x65935e4a2f2fbc2fULL, 0x4ee79d8ec0c027c0ULL, 0xbe81a160dede5fdeULL, 0xe06c38fc1c1c701cULL, 0xbb2ee746fdfdd3fdULL, 0x52649a1f4d4d294dULL, 0xe4e0397692927292ULL, 0x8fbceafa7575c975ULL, 0x301e0c3606061806ULL, 0x249809ae8a8a128aULL, 0xf940794bb2b2f2b2ULL, 0x6359d185e6e6bfe6ULL, 0x70361c7e0e0e380eULL, 0xf8633ee71f1f7c1fULL, 0x37f7c45562629562ULL, 0xeea3b53ad4d477d4ULL, 0x29324d81a8a89aa8ULL, 0xc4f4315296966296ULL, 0x9b3aef62f9f9c3f9ULL, 0x66f697a3c5c533c5ULL, 0x35b14a1025259425ULL, 0xf220b2ab59597959ULL, 0x54ae15d084842a84ULL, 0xb7a7e4c57272d572ULL, 0xd5dd72ec3939e439ULL, 0x5a6198164c4c2d4cULL, 0xca3bbc945e5e655eULL, 0xe785f09f7878fd78ULL, 0xddd870e53838e038ULL, 0x148605988c8c0a8cULL, 0xc6b2bf17d1d163d1ULL, 0x410b57e4a5a5aea5ULL, 0x434dd9a1e2e2afe2ULL, 0x2ff8c24e61619961ULL, 0xf1457b42b3b3f6b3ULL, 0x15a5423421218421ULL, 0x94d625089c9c4a9cULL, 0xf0663cee1e1e781eULL, 0x2252866143431143ULL, 0x76fc93b1c7c73bc7ULL, 0xb32be54ffcfcd7fcULL, 0x2014082404041004ULL, 0xb208a2e351515951ULL, 0xbcc72f2599995e99ULL, 0x4fc4da226d6da96dULL, 0x68391a650d0d340dULL, 0x8335e979fafacffaULL, 0xb684a369dfdf5bdfULL, 0xd79bfca97e7ee57eULL, 0x3db4481924249024ULL, 0xc5d776fe3b3bec3bULL, 0x313d4b9aabab96abULL, 0x3ed181f0cece1fceULL, 0x8855229911114411ULL, 0x0c8903838f8f068fULL, 0x4a6b9c044e4e254eULL, 0xd1517366b7b7e6b7ULL, 0x0b60cbe0ebeb8bebULL, 0xfdcc78c13c3cf03cULL, 0x7cbf1ffd81813e81ULL, 0xd4fe354094946a94ULL, 0xeb0cf31cf7f7fbf7ULL, 0xa1676f18b9b9deb9ULL, 0x985f268b13134c13ULL, 0x7d9c58512c2cb02cULL, 0xd6b8bb05d3d36bd3ULL, 0x6b5cd38ce7e7bbe7ULL, 0x57cbdc396e6ea56eULL, 0x6ef395aac4c437c4ULL, 0x180f061b03030c03ULL, 0x8a13acdc56564556ULL, 0x1a49885e44440d44ULL, 0xdf9efea07f7fe17fULL, 0x21374f88a9a99ea9ULL, 0x4d8254672a2aa82aULL, 0xb16d6b0abbbbd6bbULL, 0x46e29f87c1c123c1ULL, 0xa202a6f153535153ULL, 0xae8ba572dcdc57dcULL, 0x582716530b0b2c0bULL, 0x9cd327019d9d4e9dULL, 0x47c1d82b6c6cad6cULL, 0x95f562a43131c431ULL, 0x87b9e8f37474cd74ULL, 0xe309f115f6f6fff6ULL, 0x0a438c4c46460546ULL, 0x092645a5acac8aacULL, 0x3c970fb589891e89ULL, 0xa04428b414145014ULL, 0x5b42dfbae1e1a3e1ULL, 0xb04e2ca616165816ULL, 0xcdd274f73a3ae83aULL, 0x6fd0d2066969b969ULL, 0x482d124109092409ULL, 0xa7ade0d77070dd70ULL, 0xd954716fb6b6e2b6ULL, 0xceb7bd1ed0d067d0ULL, 0x3b7ec7d6eded93edULL, 0x2edb85e2cccc17ccULL, 0x2a57846842421542ULL, 0xb4c22d2c98985a98ULL, 0x490e55eda4a4aaa4ULL, 0x5d8850752828a028ULL, 0xda31b8865c5c6d5cULL, 0x933fed6bf8f8c7f8ULL, 0x44a411c286862286ULL, }; static const u64 C5[256] = { 0x18c07830d8181860ULL, 0x2305af462623238cULL, 0xc67ef991b8c6c63fULL, 0xe8136fcdfbe8e887ULL, 0x874ca113cb878726ULL, 0xb8a9626d11b8b8daULL, 0x0108050209010104ULL, 0x4f426e9e0d4f4f21ULL, 0x36adee6c9b3636d8ULL, 0xa6590451ffa6a6a2ULL, 0xd2debdb90cd2d26fULL, 0xf5fb06f70ef5f5f3ULL, 0x79ef80f2967979f9ULL, 0x6f5fcede306f6fa1ULL, 0x91fcef3f6d91917eULL, 0x52aa07a4f8525255ULL, 0x6027fdc04760609dULL, 0xbc89766535bcbccaULL, 0x9baccd2b379b9b56ULL, 0x8e048c018a8e8e02ULL, 0xa371155bd2a3a3b6ULL, 0x0c603c186c0c0c30ULL, 0x7bff8af6847b7bf1ULL, 0x35b5e16a803535d4ULL, 0x1de8693af51d1d74ULL, 0xe05347ddb3e0e0a7ULL, 0xd7f6acb321d7d77bULL, 0xc25eed999cc2c22fULL, 0x2e6d965c432e2eb8ULL, 0x4b627a96294b4b31ULL, 0xfea321e15dfefedfULL, 0x578216aed5575741ULL, 0x15a8412abd151554ULL, 0x779fb6eee87777c1ULL, 0x37a5eb6e923737dcULL, 0xe57b56d79ee5e5b3ULL, 0x9f8cd923139f9f46ULL, 0xf0d317fd23f0f0e7ULL, 0x4a6a7f94204a4a35ULL, 0xda9e95a944dada4fULL, 0x58fa25b0a258587dULL, 0xc906ca8fcfc9c903ULL, 0x29558d527c2929a4ULL, 0x0a5022145a0a0a28ULL, 0xb1e14f7f50b1b1feULL, 0xa0691a5dc9a0a0baULL, 0x6b7fdad6146b6bb1ULL, 0x855cab17d985852eULL, 0xbd8173673cbdbdceULL, 0x5dd234ba8f5d5d69ULL, 0x1080502090101040ULL, 0xf4f303f507f4f4f7ULL, 0xcb16c08bddcbcb0bULL, 0x3eedc67cd33e3ef8ULL, 0x0528110a2d050514ULL, 0x671fe6ce78676781ULL, 0xe47353d597e4e4b7ULL, 0x2725bb4e0227279cULL, 0x4132588273414119ULL, 0x8b2c9d0ba78b8b16ULL, 0xa7510153f6a7a7a6ULL, 0x7dcf94fab27d7de9ULL, 0x95dcfb374995956eULL, 0xd88e9fad56d8d847ULL, 0xfb8b30eb70fbfbcbULL, 0xee2371c1cdeeee9fULL, 0x7cc791f8bb7c7cedULL, 0x6617e3cc71666685ULL, 0xdda68ea77bdddd53ULL, 0x17b84b2eaf17175cULL, 0x4702468e45474701ULL, 0x9e84dc211a9e9e42ULL, 0xca1ec589d4caca0fULL, 0x2d75995a582d2db4ULL, 0xbf9179632ebfbfc6ULL, 0x07381b0e3f07071cULL, 0xad012347acadad8eULL, 0x5aea2fb4b05a5a75ULL, 0x836cb51bef838336ULL, 0x3385ff66b63333ccULL, 0x633ff2c65c636391ULL, 0x02100a0412020208ULL, 0xaa39384993aaaa92ULL, 0x71afa8e2de7171d9ULL, 0xc80ecf8dc6c8c807ULL, 0x19c87d32d1191964ULL, 0x497270923b494939ULL, 0xd9869aaf5fd9d943ULL, 0xf2c31df931f2f2efULL, 0xe34b48dba8e3e3abULL, 0x5be22ab6b95b5b71ULL, 0x8834920dbc88881aULL, 0x9aa4c8293e9a9a52ULL, 0x262dbe4c0b262698ULL, 0x328dfa64bf3232c8ULL, 0xb0e94a7d59b0b0faULL, 0xe91b6acff2e9e983ULL, 0x0f78331e770f0f3cULL, 0xd5e6a6b733d5d573ULL, 0x8074ba1df480803aULL, 0xbe997c6127bebec2ULL, 0xcd26de87ebcdcd13ULL, 0x34bde468893434d0ULL, 0x487a75903248483dULL, 0xffab24e354ffffdbULL, 0x7af78ff48d7a7af5ULL, 0x90f4ea3d6490907aULL, 0x5fc23ebe9d5f5f61ULL, 0x201da0403d202080ULL, 0x6867d5d00f6868bdULL, 0x1ad07234ca1a1a68ULL, 0xae192c41b7aeae82ULL, 0xb4c95e757db4b4eaULL, 0x549a19a8ce54544dULL, 0x93ece53b7f939376ULL, 0x220daa442f222288ULL, 0x6407e9c86364648dULL, 0xf1db12ff2af1f1e3ULL, 0x73bfa2e6cc7373d1ULL, 0x12905a2482121248ULL, 0x403a5d807a40401dULL, 0x0840281048080820ULL, 0xc356e89b95c3c32bULL, 0xec337bc5dfecec97ULL, 0xdb9690ab4ddbdb4bULL, 0xa1611f5fc0a1a1beULL, 0x8d1c8307918d8d0eULL, 0x3df5c97ac83d3df4ULL, 0x97ccf1335b979766ULL, 0x0000000000000000ULL, 0xcf36d483f9cfcf1bULL, 0x2b4587566e2b2bacULL, 0x7697b3ece17676c5ULL, 0x8264b019e6828232ULL, 0xd6fea9b128d6d67fULL, 0x1bd87736c31b1b6cULL, 0xb5c15b7774b5b5eeULL, 0xaf112943beafaf86ULL, 0x6a77dfd41d6a6ab5ULL, 0x50ba0da0ea50505dULL, 0x45124c8a57454509ULL, 0xf3cb18fb38f3f3ebULL, 0x309df060ad3030c0ULL, 0xef2b74c3c4efef9bULL, 0x3fe5c37eda3f3ffcULL, 0x55921caac7555549ULL, 0xa2791059dba2a2b2ULL, 0xea0365c9e9eaea8fULL, 0x650fecca6a656589ULL, 0xbab9686903babad2ULL, 0x2f65935e4a2f2fbcULL, 0xc04ee79d8ec0c027ULL, 0xdebe81a160dede5fULL, 0x1ce06c38fc1c1c70ULL, 0xfdbb2ee746fdfdd3ULL, 0x4d52649a1f4d4d29ULL, 0x92e4e03976929272ULL, 0x758fbceafa7575c9ULL, 0x06301e0c36060618ULL, 0x8a249809ae8a8a12ULL, 0xb2f940794bb2b2f2ULL, 0xe66359d185e6e6bfULL, 0x0e70361c7e0e0e38ULL, 0x1ff8633ee71f1f7cULL, 0x6237f7c455626295ULL, 0xd4eea3b53ad4d477ULL, 0xa829324d81a8a89aULL, 0x96c4f43152969662ULL, 0xf99b3aef62f9f9c3ULL, 0xc566f697a3c5c533ULL, 0x2535b14a10252594ULL, 0x59f220b2ab595979ULL, 0x8454ae15d084842aULL, 0x72b7a7e4c57272d5ULL, 0x39d5dd72ec3939e4ULL, 0x4c5a6198164c4c2dULL, 0x5eca3bbc945e5e65ULL, 0x78e785f09f7878fdULL, 0x38ddd870e53838e0ULL, 0x8c148605988c8c0aULL, 0xd1c6b2bf17d1d163ULL, 0xa5410b57e4a5a5aeULL, 0xe2434dd9a1e2e2afULL, 0x612ff8c24e616199ULL, 0xb3f1457b42b3b3f6ULL, 0x2115a54234212184ULL, 0x9c94d625089c9c4aULL, 0x1ef0663cee1e1e78ULL, 0x4322528661434311ULL, 0xc776fc93b1c7c73bULL, 0xfcb32be54ffcfcd7ULL, 0x0420140824040410ULL, 0x51b208a2e3515159ULL, 0x99bcc72f2599995eULL, 0x6d4fc4da226d6da9ULL, 0x0d68391a650d0d34ULL, 0xfa8335e979fafacfULL, 0xdfb684a369dfdf5bULL, 0x7ed79bfca97e7ee5ULL, 0x243db44819242490ULL, 0x3bc5d776fe3b3becULL, 0xab313d4b9aabab96ULL, 0xce3ed181f0cece1fULL, 0x1188552299111144ULL, 0x8f0c8903838f8f06ULL, 0x4e4a6b9c044e4e25ULL, 0xb7d1517366b7b7e6ULL, 0xeb0b60cbe0ebeb8bULL, 0x3cfdcc78c13c3cf0ULL, 0x817cbf1ffd81813eULL, 0x94d4fe354094946aULL, 0xf7eb0cf31cf7f7fbULL, 0xb9a1676f18b9b9deULL, 0x13985f268b13134cULL, 0x2c7d9c58512c2cb0ULL, 0xd3d6b8bb05d3d36bULL, 0xe76b5cd38ce7e7bbULL, 0x6e57cbdc396e6ea5ULL, 0xc46ef395aac4c437ULL, 0x03180f061b03030cULL, 0x568a13acdc565645ULL, 0x441a49885e44440dULL, 0x7fdf9efea07f7fe1ULL, 0xa921374f88a9a99eULL, 0x2a4d8254672a2aa8ULL, 0xbbb16d6b0abbbbd6ULL, 0xc146e29f87c1c123ULL, 0x53a202a6f1535351ULL, 0xdcae8ba572dcdc57ULL, 0x0b582716530b0b2cULL, 0x9d9cd327019d9d4eULL, 0x6c47c1d82b6c6cadULL, 0x3195f562a43131c4ULL, 0x7487b9e8f37474cdULL, 0xf6e309f115f6f6ffULL, 0x460a438c4c464605ULL, 0xac092645a5acac8aULL, 0x893c970fb589891eULL, 0x14a04428b4141450ULL, 0xe15b42dfbae1e1a3ULL, 0x16b04e2ca6161658ULL, 0x3acdd274f73a3ae8ULL, 0x696fd0d2066969b9ULL, 0x09482d1241090924ULL, 0x70a7ade0d77070ddULL, 0xb6d954716fb6b6e2ULL, 0xd0ceb7bd1ed0d067ULL, 0xed3b7ec7d6eded93ULL, 0xcc2edb85e2cccc17ULL, 0x422a578468424215ULL, 0x98b4c22d2c98985aULL, 0xa4490e55eda4a4aaULL, 0x285d8850752828a0ULL, 0x5cda31b8865c5c6dULL, 0xf8933fed6bf8f8c7ULL, 0x8644a411c2868622ULL, }; static const u64 C6[256] = { 0x6018c07830d81818ULL, 0x8c2305af46262323ULL, 0x3fc67ef991b8c6c6ULL, 0x87e8136fcdfbe8e8ULL, 0x26874ca113cb8787ULL, 0xdab8a9626d11b8b8ULL, 0x0401080502090101ULL, 0x214f426e9e0d4f4fULL, 0xd836adee6c9b3636ULL, 0xa2a6590451ffa6a6ULL, 0x6fd2debdb90cd2d2ULL, 0xf3f5fb06f70ef5f5ULL, 0xf979ef80f2967979ULL, 0xa16f5fcede306f6fULL, 0x7e91fcef3f6d9191ULL, 0x5552aa07a4f85252ULL, 0x9d6027fdc0476060ULL, 0xcabc89766535bcbcULL, 0x569baccd2b379b9bULL, 0x028e048c018a8e8eULL, 0xb6a371155bd2a3a3ULL, 0x300c603c186c0c0cULL, 0xf17bff8af6847b7bULL, 0xd435b5e16a803535ULL, 0x741de8693af51d1dULL, 0xa7e05347ddb3e0e0ULL, 0x7bd7f6acb321d7d7ULL, 0x2fc25eed999cc2c2ULL, 0xb82e6d965c432e2eULL, 0x314b627a96294b4bULL, 0xdffea321e15dfefeULL, 0x41578216aed55757ULL, 0x5415a8412abd1515ULL, 0xc1779fb6eee87777ULL, 0xdc37a5eb6e923737ULL, 0xb3e57b56d79ee5e5ULL, 0x469f8cd923139f9fULL, 0xe7f0d317fd23f0f0ULL, 0x354a6a7f94204a4aULL, 0x4fda9e95a944dadaULL, 0x7d58fa25b0a25858ULL, 0x03c906ca8fcfc9c9ULL, 0xa429558d527c2929ULL, 0x280a5022145a0a0aULL, 0xfeb1e14f7f50b1b1ULL, 0xbaa0691a5dc9a0a0ULL, 0xb16b7fdad6146b6bULL, 0x2e855cab17d98585ULL, 0xcebd8173673cbdbdULL, 0x695dd234ba8f5d5dULL, 0x4010805020901010ULL, 0xf7f4f303f507f4f4ULL, 0x0bcb16c08bddcbcbULL, 0xf83eedc67cd33e3eULL, 0x140528110a2d0505ULL, 0x81671fe6ce786767ULL, 0xb7e47353d597e4e4ULL, 0x9c2725bb4e022727ULL, 0x1941325882734141ULL, 0x168b2c9d0ba78b8bULL, 0xa6a7510153f6a7a7ULL, 0xe97dcf94fab27d7dULL, 0x6e95dcfb37499595ULL, 0x47d88e9fad56d8d8ULL, 0xcbfb8b30eb70fbfbULL, 0x9fee2371c1cdeeeeULL, 0xed7cc791f8bb7c7cULL, 0x856617e3cc716666ULL, 0x53dda68ea77bddddULL, 0x5c17b84b2eaf1717ULL, 0x014702468e454747ULL, 0x429e84dc211a9e9eULL, 0x0fca1ec589d4cacaULL, 0xb42d75995a582d2dULL, 0xc6bf9179632ebfbfULL, 0x1c07381b0e3f0707ULL, 0x8ead012347acadadULL, 0x755aea2fb4b05a5aULL, 0x36836cb51bef8383ULL, 0xcc3385ff66b63333ULL, 0x91633ff2c65c6363ULL, 0x0802100a04120202ULL, 0x92aa39384993aaaaULL, 0xd971afa8e2de7171ULL, 0x07c80ecf8dc6c8c8ULL, 0x6419c87d32d11919ULL, 0x39497270923b4949ULL, 0x43d9869aaf5fd9d9ULL, 0xeff2c31df931f2f2ULL, 0xabe34b48dba8e3e3ULL, 0x715be22ab6b95b5bULL, 0x1a8834920dbc8888ULL, 0x529aa4c8293e9a9aULL, 0x98262dbe4c0b2626ULL, 0xc8328dfa64bf3232ULL, 0xfab0e94a7d59b0b0ULL, 0x83e91b6acff2e9e9ULL, 0x3c0f78331e770f0fULL, 0x73d5e6a6b733d5d5ULL, 0x3a8074ba1df48080ULL, 0xc2be997c6127bebeULL, 0x13cd26de87ebcdcdULL, 0xd034bde468893434ULL, 0x3d487a7590324848ULL, 0xdbffab24e354ffffULL, 0xf57af78ff48d7a7aULL, 0x7a90f4ea3d649090ULL, 0x615fc23ebe9d5f5fULL, 0x80201da0403d2020ULL, 0xbd6867d5d00f6868ULL, 0x681ad07234ca1a1aULL, 0x82ae192c41b7aeaeULL, 0xeab4c95e757db4b4ULL, 0x4d549a19a8ce5454ULL, 0x7693ece53b7f9393ULL, 0x88220daa442f2222ULL, 0x8d6407e9c8636464ULL, 0xe3f1db12ff2af1f1ULL, 0xd173bfa2e6cc7373ULL, 0x4812905a24821212ULL, 0x1d403a5d807a4040ULL, 0x2008402810480808ULL, 0x2bc356e89b95c3c3ULL, 0x97ec337bc5dfececULL, 0x4bdb9690ab4ddbdbULL, 0xbea1611f5fc0a1a1ULL, 0x0e8d1c8307918d8dULL, 0xf43df5c97ac83d3dULL, 0x6697ccf1335b9797ULL, 0x0000000000000000ULL, 0x1bcf36d483f9cfcfULL, 0xac2b4587566e2b2bULL, 0xc57697b3ece17676ULL, 0x328264b019e68282ULL, 0x7fd6fea9b128d6d6ULL, 0x6c1bd87736c31b1bULL, 0xeeb5c15b7774b5b5ULL, 0x86af112943beafafULL, 0xb56a77dfd41d6a6aULL, 0x5d50ba0da0ea5050ULL, 0x0945124c8a574545ULL, 0xebf3cb18fb38f3f3ULL, 0xc0309df060ad3030ULL, 0x9bef2b74c3c4efefULL, 0xfc3fe5c37eda3f3fULL, 0x4955921caac75555ULL, 0xb2a2791059dba2a2ULL, 0x8fea0365c9e9eaeaULL, 0x89650fecca6a6565ULL, 0xd2bab9686903babaULL, 0xbc2f65935e4a2f2fULL, 0x27c04ee79d8ec0c0ULL, 0x5fdebe81a160dedeULL, 0x701ce06c38fc1c1cULL, 0xd3fdbb2ee746fdfdULL, 0x294d52649a1f4d4dULL, 0x7292e4e039769292ULL, 0xc9758fbceafa7575ULL, 0x1806301e0c360606ULL, 0x128a249809ae8a8aULL, 0xf2b2f940794bb2b2ULL, 0xbfe66359d185e6e6ULL, 0x380e70361c7e0e0eULL, 0x7c1ff8633ee71f1fULL, 0x956237f7c4556262ULL, 0x77d4eea3b53ad4d4ULL, 0x9aa829324d81a8a8ULL, 0x6296c4f431529696ULL, 0xc3f99b3aef62f9f9ULL, 0x33c566f697a3c5c5ULL, 0x942535b14a102525ULL, 0x7959f220b2ab5959ULL, 0x2a8454ae15d08484ULL, 0xd572b7a7e4c57272ULL, 0xe439d5dd72ec3939ULL, 0x2d4c5a6198164c4cULL, 0x655eca3bbc945e5eULL, 0xfd78e785f09f7878ULL, 0xe038ddd870e53838ULL, 0x0a8c148605988c8cULL, 0x63d1c6b2bf17d1d1ULL, 0xaea5410b57e4a5a5ULL, 0xafe2434dd9a1e2e2ULL, 0x99612ff8c24e6161ULL, 0xf6b3f1457b42b3b3ULL, 0x842115a542342121ULL, 0x4a9c94d625089c9cULL, 0x781ef0663cee1e1eULL, 0x1143225286614343ULL, 0x3bc776fc93b1c7c7ULL, 0xd7fcb32be54ffcfcULL, 0x1004201408240404ULL, 0x5951b208a2e35151ULL, 0x5e99bcc72f259999ULL, 0xa96d4fc4da226d6dULL, 0x340d68391a650d0dULL, 0xcffa8335e979fafaULL, 0x5bdfb684a369dfdfULL, 0xe57ed79bfca97e7eULL, 0x90243db448192424ULL, 0xec3bc5d776fe3b3bULL, 0x96ab313d4b9aababULL, 0x1fce3ed181f0ceceULL, 0x4411885522991111ULL, 0x068f0c8903838f8fULL, 0x254e4a6b9c044e4eULL, 0xe6b7d1517366b7b7ULL, 0x8beb0b60cbe0ebebULL, 0xf03cfdcc78c13c3cULL, 0x3e817cbf1ffd8181ULL, 0x6a94d4fe35409494ULL, 0xfbf7eb0cf31cf7f7ULL, 0xdeb9a1676f18b9b9ULL, 0x4c13985f268b1313ULL, 0xb02c7d9c58512c2cULL, 0x6bd3d6b8bb05d3d3ULL, 0xbbe76b5cd38ce7e7ULL, 0xa56e57cbdc396e6eULL, 0x37c46ef395aac4c4ULL, 0x0c03180f061b0303ULL, 0x45568a13acdc5656ULL, 0x0d441a49885e4444ULL, 0xe17fdf9efea07f7fULL, 0x9ea921374f88a9a9ULL, 0xa82a4d8254672a2aULL, 0xd6bbb16d6b0abbbbULL, 0x23c146e29f87c1c1ULL, 0x5153a202a6f15353ULL, 0x57dcae8ba572dcdcULL, 0x2c0b582716530b0bULL, 0x4e9d9cd327019d9dULL, 0xad6c47c1d82b6c6cULL, 0xc43195f562a43131ULL, 0xcd7487b9e8f37474ULL, 0xfff6e309f115f6f6ULL, 0x05460a438c4c4646ULL, 0x8aac092645a5acacULL, 0x1e893c970fb58989ULL, 0x5014a04428b41414ULL, 0xa3e15b42dfbae1e1ULL, 0x5816b04e2ca61616ULL, 0xe83acdd274f73a3aULL, 0xb9696fd0d2066969ULL, 0x2409482d12410909ULL, 0xdd70a7ade0d77070ULL, 0xe2b6d954716fb6b6ULL, 0x67d0ceb7bd1ed0d0ULL, 0x93ed3b7ec7d6ededULL, 0x17cc2edb85e2ccccULL, 0x15422a5784684242ULL, 0x5a98b4c22d2c9898ULL, 0xaaa4490e55eda4a4ULL, 0xa0285d8850752828ULL, 0x6d5cda31b8865c5cULL, 0xc7f8933fed6bf8f8ULL, 0x228644a411c28686ULL, }; static const u64 C7[256] = { 0x186018c07830d818ULL, 0x238c2305af462623ULL, 0xc63fc67ef991b8c6ULL, 0xe887e8136fcdfbe8ULL, 0x8726874ca113cb87ULL, 0xb8dab8a9626d11b8ULL, 0x0104010805020901ULL, 0x4f214f426e9e0d4fULL, 0x36d836adee6c9b36ULL, 0xa6a2a6590451ffa6ULL, 0xd26fd2debdb90cd2ULL, 0xf5f3f5fb06f70ef5ULL, 0x79f979ef80f29679ULL, 0x6fa16f5fcede306fULL, 0x917e91fcef3f6d91ULL, 0x525552aa07a4f852ULL, 0x609d6027fdc04760ULL, 0xbccabc89766535bcULL, 0x9b569baccd2b379bULL, 0x8e028e048c018a8eULL, 0xa3b6a371155bd2a3ULL, 0x0c300c603c186c0cULL, 0x7bf17bff8af6847bULL, 0x35d435b5e16a8035ULL, 0x1d741de8693af51dULL, 0xe0a7e05347ddb3e0ULL, 0xd77bd7f6acb321d7ULL, 0xc22fc25eed999cc2ULL, 0x2eb82e6d965c432eULL, 0x4b314b627a96294bULL, 0xfedffea321e15dfeULL, 0x5741578216aed557ULL, 0x155415a8412abd15ULL, 0x77c1779fb6eee877ULL, 0x37dc37a5eb6e9237ULL, 0xe5b3e57b56d79ee5ULL, 0x9f469f8cd923139fULL, 0xf0e7f0d317fd23f0ULL, 0x4a354a6a7f94204aULL, 0xda4fda9e95a944daULL, 0x587d58fa25b0a258ULL, 0xc903c906ca8fcfc9ULL, 0x29a429558d527c29ULL, 0x0a280a5022145a0aULL, 0xb1feb1e14f7f50b1ULL, 0xa0baa0691a5dc9a0ULL, 0x6bb16b7fdad6146bULL, 0x852e855cab17d985ULL, 0xbdcebd8173673cbdULL, 0x5d695dd234ba8f5dULL, 0x1040108050209010ULL, 0xf4f7f4f303f507f4ULL, 0xcb0bcb16c08bddcbULL, 0x3ef83eedc67cd33eULL, 0x05140528110a2d05ULL, 0x6781671fe6ce7867ULL, 0xe4b7e47353d597e4ULL, 0x279c2725bb4e0227ULL, 0x4119413258827341ULL, 0x8b168b2c9d0ba78bULL, 0xa7a6a7510153f6a7ULL, 0x7de97dcf94fab27dULL, 0x956e95dcfb374995ULL, 0xd847d88e9fad56d8ULL, 0xfbcbfb8b30eb70fbULL, 0xee9fee2371c1cdeeULL, 0x7ced7cc791f8bb7cULL, 0x66856617e3cc7166ULL, 0xdd53dda68ea77bddULL, 0x175c17b84b2eaf17ULL, 0x47014702468e4547ULL, 0x9e429e84dc211a9eULL, 0xca0fca1ec589d4caULL, 0x2db42d75995a582dULL, 0xbfc6bf9179632ebfULL, 0x071c07381b0e3f07ULL, 0xad8ead012347acadULL, 0x5a755aea2fb4b05aULL, 0x8336836cb51bef83ULL, 0x33cc3385ff66b633ULL, 0x6391633ff2c65c63ULL, 0x020802100a041202ULL, 0xaa92aa39384993aaULL, 0x71d971afa8e2de71ULL, 0xc807c80ecf8dc6c8ULL, 0x196419c87d32d119ULL, 0x4939497270923b49ULL, 0xd943d9869aaf5fd9ULL, 0xf2eff2c31df931f2ULL, 0xe3abe34b48dba8e3ULL, 0x5b715be22ab6b95bULL, 0x881a8834920dbc88ULL, 0x9a529aa4c8293e9aULL, 0x2698262dbe4c0b26ULL, 0x32c8328dfa64bf32ULL, 0xb0fab0e94a7d59b0ULL, 0xe983e91b6acff2e9ULL, 0x0f3c0f78331e770fULL, 0xd573d5e6a6b733d5ULL, 0x803a8074ba1df480ULL, 0xbec2be997c6127beULL, 0xcd13cd26de87ebcdULL, 0x34d034bde4688934ULL, 0x483d487a75903248ULL, 0xffdbffab24e354ffULL, 0x7af57af78ff48d7aULL, 0x907a90f4ea3d6490ULL, 0x5f615fc23ebe9d5fULL, 0x2080201da0403d20ULL, 0x68bd6867d5d00f68ULL, 0x1a681ad07234ca1aULL, 0xae82ae192c41b7aeULL, 0xb4eab4c95e757db4ULL, 0x544d549a19a8ce54ULL, 0x937693ece53b7f93ULL, 0x2288220daa442f22ULL, 0x648d6407e9c86364ULL, 0xf1e3f1db12ff2af1ULL, 0x73d173bfa2e6cc73ULL, 0x124812905a248212ULL, 0x401d403a5d807a40ULL, 0x0820084028104808ULL, 0xc32bc356e89b95c3ULL, 0xec97ec337bc5dfecULL, 0xdb4bdb9690ab4ddbULL, 0xa1bea1611f5fc0a1ULL, 0x8d0e8d1c8307918dULL, 0x3df43df5c97ac83dULL, 0x976697ccf1335b97ULL, 0x0000000000000000ULL, 0xcf1bcf36d483f9cfULL, 0x2bac2b4587566e2bULL, 0x76c57697b3ece176ULL, 0x82328264b019e682ULL, 0xd67fd6fea9b128d6ULL, 0x1b6c1bd87736c31bULL, 0xb5eeb5c15b7774b5ULL, 0xaf86af112943beafULL, 0x6ab56a77dfd41d6aULL, 0x505d50ba0da0ea50ULL, 0x450945124c8a5745ULL, 0xf3ebf3cb18fb38f3ULL, 0x30c0309df060ad30ULL, 0xef9bef2b74c3c4efULL, 0x3ffc3fe5c37eda3fULL, 0x554955921caac755ULL, 0xa2b2a2791059dba2ULL, 0xea8fea0365c9e9eaULL, 0x6589650fecca6a65ULL, 0xbad2bab9686903baULL, 0x2fbc2f65935e4a2fULL, 0xc027c04ee79d8ec0ULL, 0xde5fdebe81a160deULL, 0x1c701ce06c38fc1cULL, 0xfdd3fdbb2ee746fdULL, 0x4d294d52649a1f4dULL, 0x927292e4e0397692ULL, 0x75c9758fbceafa75ULL, 0x061806301e0c3606ULL, 0x8a128a249809ae8aULL, 0xb2f2b2f940794bb2ULL, 0xe6bfe66359d185e6ULL, 0x0e380e70361c7e0eULL, 0x1f7c1ff8633ee71fULL, 0x62956237f7c45562ULL, 0xd477d4eea3b53ad4ULL, 0xa89aa829324d81a8ULL, 0x966296c4f4315296ULL, 0xf9c3f99b3aef62f9ULL, 0xc533c566f697a3c5ULL, 0x25942535b14a1025ULL, 0x597959f220b2ab59ULL, 0x842a8454ae15d084ULL, 0x72d572b7a7e4c572ULL, 0x39e439d5dd72ec39ULL, 0x4c2d4c5a6198164cULL, 0x5e655eca3bbc945eULL, 0x78fd78e785f09f78ULL, 0x38e038ddd870e538ULL, 0x8c0a8c148605988cULL, 0xd163d1c6b2bf17d1ULL, 0xa5aea5410b57e4a5ULL, 0xe2afe2434dd9a1e2ULL, 0x6199612ff8c24e61ULL, 0xb3f6b3f1457b42b3ULL, 0x21842115a5423421ULL, 0x9c4a9c94d625089cULL, 0x1e781ef0663cee1eULL, 0x4311432252866143ULL, 0xc73bc776fc93b1c7ULL, 0xfcd7fcb32be54ffcULL, 0x0410042014082404ULL, 0x515951b208a2e351ULL, 0x995e99bcc72f2599ULL, 0x6da96d4fc4da226dULL, 0x0d340d68391a650dULL, 0xfacffa8335e979faULL, 0xdf5bdfb684a369dfULL, 0x7ee57ed79bfca97eULL, 0x2490243db4481924ULL, 0x3bec3bc5d776fe3bULL, 0xab96ab313d4b9aabULL, 0xce1fce3ed181f0ceULL, 0x1144118855229911ULL, 0x8f068f0c8903838fULL, 0x4e254e4a6b9c044eULL, 0xb7e6b7d1517366b7ULL, 0xeb8beb0b60cbe0ebULL, 0x3cf03cfdcc78c13cULL, 0x813e817cbf1ffd81ULL, 0x946a94d4fe354094ULL, 0xf7fbf7eb0cf31cf7ULL, 0xb9deb9a1676f18b9ULL, 0x134c13985f268b13ULL, 0x2cb02c7d9c58512cULL, 0xd36bd3d6b8bb05d3ULL, 0xe7bbe76b5cd38ce7ULL, 0x6ea56e57cbdc396eULL, 0xc437c46ef395aac4ULL, 0x030c03180f061b03ULL, 0x5645568a13acdc56ULL, 0x440d441a49885e44ULL, 0x7fe17fdf9efea07fULL, 0xa99ea921374f88a9ULL, 0x2aa82a4d8254672aULL, 0xbbd6bbb16d6b0abbULL, 0xc123c146e29f87c1ULL, 0x535153a202a6f153ULL, 0xdc57dcae8ba572dcULL, 0x0b2c0b582716530bULL, 0x9d4e9d9cd327019dULL, 0x6cad6c47c1d82b6cULL, 0x31c43195f562a431ULL, 0x74cd7487b9e8f374ULL, 0xf6fff6e309f115f6ULL, 0x4605460a438c4c46ULL, 0xac8aac092645a5acULL, 0x891e893c970fb589ULL, 0x145014a04428b414ULL, 0xe1a3e15b42dfbae1ULL, 0x165816b04e2ca616ULL, 0x3ae83acdd274f73aULL, 0x69b9696fd0d20669ULL, 0x092409482d124109ULL, 0x70dd70a7ade0d770ULL, 0xb6e2b6d954716fb6ULL, 0xd067d0ceb7bd1ed0ULL, 0xed93ed3b7ec7d6edULL, 0xcc17cc2edb85e2ccULL, 0x4215422a57846842ULL, 0x985a98b4c22d2c98ULL, 0xa4aaa4490e55eda4ULL, 0x28a0285d88507528ULL, 0x5c6d5cda31b8865cULL, 0xf8c7f8933fed6bf8ULL, 0x86228644a411c286ULL, }; static const u64 rc[WHIRLPOOL_ROUNDS] = { 0x1823c6e887b8014fULL, 0x36a6d2f5796f9152ULL, 0x60bc9b8ea30c7b35ULL, 0x1de0d7c22e4bfe57ULL, 0x157737e59ff04adaULL, 0x58c9290ab1a06b85ULL, 0xbd5d10f4cb3e0567ULL, 0xe427418ba77d95d8ULL, 0xfbee7c66dd17479eULL, 0xca2dbf07ad5a8333ULL, }; /** * The core Whirlpool transform. */ static void wp512_process_buffer(struct wp512_ctx *wctx) { int i, r; u64 K[8]; /* the round key */ u64 block[8]; /* mu(buffer) */ u64 state[8]; /* the cipher state */ u64 L[8]; const __be64 *buffer = (const __be64 *)wctx->buffer; for (i = 0; i < 8; i++) block[i] = be64_to_cpu(buffer[i]); state[0] = block[0] ^ (K[0] = wctx->hash[0]); state[1] = block[1] ^ (K[1] = wctx->hash[1]); state[2] = block[2] ^ (K[2] = wctx->hash[2]); state[3] = block[3] ^ (K[3] = wctx->hash[3]); state[4] = block[4] ^ (K[4] = wctx->hash[4]); state[5] = block[5] ^ (K[5] = wctx->hash[5]); state[6] = block[6] ^ (K[6] = wctx->hash[6]); state[7] = block[7] ^ (K[7] = wctx->hash[7]); for (r = 0; r < WHIRLPOOL_ROUNDS; r++) { L[0] = C0[(int)(K[0] >> 56) ] ^ C1[(int)(K[7] >> 48) & 0xff] ^ C2[(int)(K[6] >> 40) & 0xff] ^ C3[(int)(K[5] >> 32) & 0xff] ^ C4[(int)(K[4] >> 24) & 0xff] ^ C5[(int)(K[3] >> 16) & 0xff] ^ C6[(int)(K[2] >> 8) & 0xff] ^ C7[(int)(K[1] ) & 0xff] ^ rc[r]; L[1] = C0[(int)(K[1] >> 56) ] ^ C1[(int)(K[0] >> 48) & 0xff] ^ C2[(int)(K[7] >> 40) & 0xff] ^ C3[(int)(K[6] >> 32) & 0xff] ^ C4[(int)(K[5] >> 24) & 0xff] ^ C5[(int)(K[4] >> 16) & 0xff] ^ C6[(int)(K[3] >> 8) & 0xff] ^ C7[(int)(K[2] ) & 0xff]; L[2] = C0[(int)(K[2] >> 56) ] ^ C1[(int)(K[1] >> 48) & 0xff] ^ C2[(int)(K[0] >> 40) & 0xff] ^ C3[(int)(K[7] >> 32) & 0xff] ^ C4[(int)(K[6] >> 24) & 0xff] ^ C5[(int)(K[5] >> 16) & 0xff] ^ C6[(int)(K[4] >> 8) & 0xff] ^ C7[(int)(K[3] ) & 0xff]; L[3] = C0[(int)(K[3] >> 56) ] ^ C1[(int)(K[2] >> 48) & 0xff] ^ C2[(int)(K[1] >> 40) & 0xff] ^ C3[(int)(K[0] >> 32) & 0xff] ^ C4[(int)(K[7] >> 24) & 0xff] ^ C5[(int)(K[6] >> 16) & 0xff] ^ C6[(int)(K[5] >> 8) & 0xff] ^ C7[(int)(K[4] ) & 0xff]; L[4] = C0[(int)(K[4] >> 56) ] ^ C1[(int)(K[3] >> 48) & 0xff] ^ C2[(int)(K[2] >> 40) & 0xff] ^ C3[(int)(K[1] >> 32) & 0xff] ^ C4[(int)(K[0] >> 24) & 0xff] ^ C5[(int)(K[7] >> 16) & 0xff] ^ C6[(int)(K[6] >> 8) & 0xff] ^ C7[(int)(K[5] ) & 0xff]; L[5] = C0[(int)(K[5] >> 56) ] ^ C1[(int)(K[4] >> 48) & 0xff] ^ C2[(int)(K[3] >> 40) & 0xff] ^ C3[(int)(K[2] >> 32) & 0xff] ^ C4[(int)(K[1] >> 24) & 0xff] ^ C5[(int)(K[0] >> 16) & 0xff] ^ C6[(int)(K[7] >> 8) & 0xff] ^ C7[(int)(K[6] ) & 0xff]; L[6] = C0[(int)(K[6] >> 56) ] ^ C1[(int)(K[5] >> 48) & 0xff] ^ C2[(int)(K[4] >> 40) & 0xff] ^ C3[(int)(K[3] >> 32) & 0xff] ^ C4[(int)(K[2] >> 24) & 0xff] ^ C5[(int)(K[1] >> 16) & 0xff] ^ C6[(int)(K[0] >> 8) & 0xff] ^ C7[(int)(K[7] ) & 0xff]; L[7] = C0[(int)(K[7] >> 56) ] ^ C1[(int)(K[6] >> 48) & 0xff] ^ C2[(int)(K[5] >> 40) & 0xff] ^ C3[(int)(K[4] >> 32) & 0xff] ^ C4[(int)(K[3] >> 24) & 0xff] ^ C5[(int)(K[2] >> 16) & 0xff] ^ C6[(int)(K[1] >> 8) & 0xff] ^ C7[(int)(K[0] ) & 0xff]; K[0] = L[0]; K[1] = L[1]; K[2] = L[2]; K[3] = L[3]; K[4] = L[4]; K[5] = L[5]; K[6] = L[6]; K[7] = L[7]; L[0] = C0[(int)(state[0] >> 56) ] ^ C1[(int)(state[7] >> 48) & 0xff] ^ C2[(int)(state[6] >> 40) & 0xff] ^ C3[(int)(state[5] >> 32) & 0xff] ^ C4[(int)(state[4] >> 24) & 0xff] ^ C5[(int)(state[3] >> 16) & 0xff] ^ C6[(int)(state[2] >> 8) & 0xff] ^ C7[(int)(state[1] ) & 0xff] ^ K[0]; L[1] = C0[(int)(state[1] >> 56) ] ^ C1[(int)(state[0] >> 48) & 0xff] ^ C2[(int)(state[7] >> 40) & 0xff] ^ C3[(int)(state[6] >> 32) & 0xff] ^ C4[(int)(state[5] >> 24) & 0xff] ^ C5[(int)(state[4] >> 16) & 0xff] ^ C6[(int)(state[3] >> 8) & 0xff] ^ C7[(int)(state[2] ) & 0xff] ^ K[1]; L[2] = C0[(int)(state[2] >> 56) ] ^ C1[(int)(state[1] >> 48) & 0xff] ^ C2[(int)(state[0] >> 40) & 0xff] ^ C3[(int)(state[7] >> 32) & 0xff] ^ C4[(int)(state[6] >> 24) & 0xff] ^ C5[(int)(state[5] >> 16) & 0xff] ^ C6[(int)(state[4] >> 8) & 0xff] ^ C7[(int)(state[3] ) & 0xff] ^ K[2]; L[3] = C0[(int)(state[3] >> 56) ] ^ C1[(int)(state[2] >> 48) & 0xff] ^ C2[(int)(state[1] >> 40) & 0xff] ^ C3[(int)(state[0] >> 32) & 0xff] ^ C4[(int)(state[7] >> 24) & 0xff] ^ C5[(int)(state[6] >> 16) & 0xff] ^ C6[(int)(state[5] >> 8) & 0xff] ^ C7[(int)(state[4] ) & 0xff] ^ K[3]; L[4] = C0[(int)(state[4] >> 56) ] ^ C1[(int)(state[3] >> 48) & 0xff] ^ C2[(int)(state[2] >> 40) & 0xff] ^ C3[(int)(state[1] >> 32) & 0xff] ^ C4[(int)(state[0] >> 24) & 0xff] ^ C5[(int)(state[7] >> 16) & 0xff] ^ C6[(int)(state[6] >> 8) & 0xff] ^ C7[(int)(state[5] ) & 0xff] ^ K[4]; L[5] = C0[(int)(state[5] >> 56) ] ^ C1[(int)(state[4] >> 48) & 0xff] ^ C2[(int)(state[3] >> 40) & 0xff] ^ C3[(int)(state[2] >> 32) & 0xff] ^ C4[(int)(state[1] >> 24) & 0xff] ^ C5[(int)(state[0] >> 16) & 0xff] ^ C6[(int)(state[7] >> 8) & 0xff] ^ C7[(int)(state[6] ) & 0xff] ^ K[5]; L[6] = C0[(int)(state[6] >> 56) ] ^ C1[(int)(state[5] >> 48) & 0xff] ^ C2[(int)(state[4] >> 40) & 0xff] ^ C3[(int)(state[3] >> 32) & 0xff] ^ C4[(int)(state[2] >> 24) & 0xff] ^ C5[(int)(state[1] >> 16) & 0xff] ^ C6[(int)(state[0] >> 8) & 0xff] ^ C7[(int)(state[7] ) & 0xff] ^ K[6]; L[7] = C0[(int)(state[7] >> 56) ] ^ C1[(int)(state[6] >> 48) & 0xff] ^ C2[(int)(state[5] >> 40) & 0xff] ^ C3[(int)(state[4] >> 32) & 0xff] ^ C4[(int)(state[3] >> 24) & 0xff] ^ C5[(int)(state[2] >> 16) & 0xff] ^ C6[(int)(state[1] >> 8) & 0xff] ^ C7[(int)(state[0] ) & 0xff] ^ K[7]; state[0] = L[0]; state[1] = L[1]; state[2] = L[2]; state[3] = L[3]; state[4] = L[4]; state[5] = L[5]; state[6] = L[6]; state[7] = L[7]; } /* * apply the Miyaguchi-Preneel compression function: */ wctx->hash[0] ^= state[0] ^ block[0]; wctx->hash[1] ^= state[1] ^ block[1]; wctx->hash[2] ^= state[2] ^ block[2]; wctx->hash[3] ^= state[3] ^ block[3]; wctx->hash[4] ^= state[4] ^ block[4]; wctx->hash[5] ^= state[5] ^ block[5]; wctx->hash[6] ^= state[6] ^ block[6]; wctx->hash[7] ^= state[7] ^ block[7]; } static int wp512_init(struct shash_desc *desc) { struct wp512_ctx *wctx = shash_desc_ctx(desc); int i; memset(wctx->bitLength, 0, 32); wctx->bufferBits = wctx->bufferPos = 0; wctx->buffer[0] = 0; for (i = 0; i < 8; i++) { wctx->hash[i] = 0L; } return 0; } static int wp512_update(struct shash_desc *desc, const u8 *source, unsigned int len) { struct wp512_ctx *wctx = shash_desc_ctx(desc); int sourcePos = 0; unsigned int bits_len = len * 8; // convert to number of bits int sourceGap = (8 - ((int)bits_len & 7)) & 7; int bufferRem = wctx->bufferBits & 7; int i; u32 b, carry; u8 *buffer = wctx->buffer; u8 *bitLength = wctx->bitLength; int bufferBits = wctx->bufferBits; int bufferPos = wctx->bufferPos; u64 value = bits_len; for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) { carry += bitLength[i] + ((u32)value & 0xff); bitLength[i] = (u8)carry; carry >>= 8; value >>= 8; } while (bits_len > 8) { b = ((source[sourcePos] << sourceGap) & 0xff) | ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap)); buffer[bufferPos++] |= (u8)(b >> bufferRem); bufferBits += 8 - bufferRem; if (bufferBits == WP512_BLOCK_SIZE * 8) { wp512_process_buffer(wctx); bufferBits = bufferPos = 0; } buffer[bufferPos] = b << (8 - bufferRem); bufferBits += bufferRem; bits_len -= 8; sourcePos++; } if (bits_len > 0) { b = (source[sourcePos] << sourceGap) & 0xff; buffer[bufferPos] |= b >> bufferRem; } else { b = 0; } if (bufferRem + bits_len < 8) { bufferBits += bits_len; } else { bufferPos++; bufferBits += 8 - bufferRem; bits_len -= 8 - bufferRem; if (bufferBits == WP512_BLOCK_SIZE * 8) { wp512_process_buffer(wctx); bufferBits = bufferPos = 0; } buffer[bufferPos] = b << (8 - bufferRem); bufferBits += (int)bits_len; } wctx->bufferBits = bufferBits; wctx->bufferPos = bufferPos; return 0; } static int wp512_final(struct shash_desc *desc, u8 *out) { struct wp512_ctx *wctx = shash_desc_ctx(desc); int i; u8 *buffer = wctx->buffer; u8 *bitLength = wctx->bitLength; int bufferBits = wctx->bufferBits; int bufferPos = wctx->bufferPos; __be64 *digest = (__be64 *)out; buffer[bufferPos] |= 0x80U >> (bufferBits & 7); bufferPos++; if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) { if (bufferPos < WP512_BLOCK_SIZE) { memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos); } wp512_process_buffer(wctx); bufferPos = 0; } if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES) { memset(&buffer[bufferPos], 0, (WP512_BLOCK_SIZE - WP512_LENGTHBYTES) - bufferPos); } bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES; memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], bitLength, WP512_LENGTHBYTES); wp512_process_buffer(wctx); for (i = 0; i < WP512_DIGEST_SIZE/8; i++) digest[i] = cpu_to_be64(wctx->hash[i]); wctx->bufferBits = bufferBits; wctx->bufferPos = bufferPos; return 0; } static int wp384_final(struct shash_desc *desc, u8 *out) { u8 D[64]; wp512_final(desc, D); memcpy (out, D, WP384_DIGEST_SIZE); memset (D, 0, WP512_DIGEST_SIZE); return 0; } static int wp256_final(struct shash_desc *desc, u8 *out) { u8 D[64]; wp512_final(desc, D); memcpy (out, D, WP256_DIGEST_SIZE); memset (D, 0, WP512_DIGEST_SIZE); return 0; } static struct shash_alg wp512 = { .digestsize = WP512_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, .final = wp512_final, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp512", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg wp384 = { .digestsize = WP384_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, .final = wp384_final, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp384", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg wp256 = { .digestsize = WP256_DIGEST_SIZE, .init = wp512_init, .update = wp512_update, .final = wp256_final, .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp256", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init wp512_mod_init(void) { int ret = 0; ret = crypto_register_shash(&wp512); if (ret < 0) goto out; ret = crypto_register_shash(&wp384); if (ret < 0) { crypto_unregister_shash(&wp512); goto out; } ret = crypto_register_shash(&wp256); if (ret < 0) { crypto_unregister_shash(&wp512); crypto_unregister_shash(&wp384); } out: return ret; } static void __exit wp512_mod_fini(void) { crypto_unregister_shash(&wp512); crypto_unregister_shash(&wp384); crypto_unregister_shash(&wp256); } MODULE_ALIAS("wp384"); MODULE_ALIAS("wp256"); module_init(wp512_mod_init); module_exit(wp512_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Whirlpool Message Digest Algorithm");
gpl-2.0
MechanicalAndroids/android_kernel_motorola_msm8226
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
5181
4990
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/mlx4/driver.h> #include "mlx4_en.h" static int mlx4_en_test_registers(struct mlx4_en_priv *priv) { return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) { struct sk_buff *skb; struct ethhdr *ethh; unsigned char *packet; unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD; unsigned int i; int err; /* build the pkt before xmit */ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); if (!skb) { en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n"); return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); packet = (unsigned char *)skb_put(skb, packet_size); memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); memset(ethh->h_source, 0, ETH_ALEN); ethh->h_proto = htons(ETH_P_ARP); skb_set_mac_header(skb, 0); for (i = 0; i < packet_size; ++i) /* fill our packet */ packet[i] = (unsigned char)(i & 0xff); /* xmit the pkt */ err = mlx4_en_xmit(skb, priv->dev); return err; } static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; priv->loopback_ok = 0; priv->validate_loopback = 1; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { en_err(priv, "Transmitting loopback packet failed\n"); goto mlx4_en_test_loopback_exit; } /* polling for result */ for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) { msleep(MLX4_EN_LOOPBACK_TIMEOUT); if (priv->loopback_ok) { loopback_ok = 1; break; } } if (!loopback_ok) en_err(priv, "Loopback packet didn't arrive\n"); mlx4_en_test_loopback_exit: priv->validate_loopback = 0; return !loopback_ok; } static int mlx4_en_test_link(struct mlx4_en_priv *priv) { if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; if (priv->port_state.link_state == 1) return 0; else return 1; } static int mlx4_en_test_speed(struct mlx4_en_priv *priv) { if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; /* The device currently only supports 10G speed */ if (priv->port_state.link_speed != SPEED_10000) return priv->port_state.link_speed; return 0; } void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *tx_ring; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); if (*flags & ETH_TEST_FL_OFFLINE) { /* disable the interface */ carrier_ok = netif_carrier_ok(dev); netif_carrier_off(dev); retry_tx: /* Wait until all tx queues are empty. * there should not be any additional incoming traffic * since we turned the carrier off */ msleep(200); for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { tx_ring = &priv->tx_ring[i]; if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) goto retry_tx; } if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { buf[3] = mlx4_en_test_registers(priv); buf[4] = mlx4_en_test_loopback(priv); } if (carrier_ok) netif_carrier_on(dev); } buf[0] = mlx4_test_interrupts(mdev->dev); buf[1] = mlx4_en_test_link(priv); buf[2] = mlx4_en_test_speed(priv); for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) { if (buf[i]) *flags |= ETH_TEST_FL_FAILED; } }
gpl-2.0
friedrich420/Note-4-TMO-AEL-Kernel-Lollipop-Source
sound/soc/blackfin/bf5xx-sport.c
7485
28031
/* * File: bf5xx_sport.c * Based on: * Author: Roy Huang <roy.huang@analog.com> * * Created: Tue Sep 21 10:52:42 CEST 2004 * Description: * Blackfin SPORT Driver * * Copyright 2004-2007 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/bug.h> #include <linux/module.h> #include <asm/portmux.h> #include <asm/dma.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include "bf5xx-sport.h" /* delay between frame sync pulse and first data bit in multichannel mode */ #define FRAME_DELAY (1<<12) /* note: multichannel is in units of 8 channels, * tdm_count is # channels NOT / 8 ! */ int sport_set_multichannel(struct sport_device *sport, int tdm_count, u32 mask, int packed) { pr_debug("%s tdm_count=%d mask:0x%08x packed=%d\n", __func__, tdm_count, mask, packed); if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; if (tdm_count & 0x7) return -EINVAL; if (tdm_count > 32) return -EINVAL; /* Only support less than 32 channels now */ if (tdm_count) { sport->regs->mcmc1 = ((tdm_count>>3)-1) << 12; sport->regs->mcmc2 = FRAME_DELAY | MCMEN | \ (packed ? (MCDTXPE|MCDRXPE) : 0); sport->regs->mtcs0 = mask; sport->regs->mrcs0 = mask; sport->regs->mtcs1 = 0; sport->regs->mrcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mrcs2 = 0; sport->regs->mtcs3 = 0; sport->regs->mrcs3 = 0; } else { sport->regs->mcmc1 = 0; sport->regs->mcmc2 = 0; sport->regs->mtcs0 = 0; sport->regs->mrcs0 = 0; } sport->regs->mtcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mtcs3 = 0; sport->regs->mrcs1 = 0; sport->regs->mrcs2 = 0; sport->regs->mrcs3 = 0; SSYNC(); return 0; } EXPORT_SYMBOL(sport_set_multichannel); int sport_config_rx(struct sport_device *sport, unsigned int rcr1, unsigned int rcr2, unsigned int clkdiv, unsigned int fsdiv) { if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; sport->regs->rcr1 = rcr1; sport->regs->rcr2 = rcr2; sport->regs->rclkdiv = clkdiv; sport->regs->rfsdiv = fsdiv; SSYNC(); return 0; } EXPORT_SYMBOL(sport_config_rx); int sport_config_tx(struct sport_device *sport, unsigned int tcr1, unsigned int tcr2, unsigned int clkdiv, unsigned int fsdiv) { if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; sport->regs->tcr1 = tcr1; sport->regs->tcr2 = tcr2; sport->regs->tclkdiv = clkdiv; sport->regs->tfsdiv = fsdiv; SSYNC(); return 0; } EXPORT_SYMBOL(sport_config_tx); static void setup_desc(struct dmasg *desc, void *buf, int fragcount, size_t fragsize, unsigned int cfg, unsigned int x_count, unsigned int ycount, size_t wdsize) { int i; for (i = 0; i < fragcount; ++i) { desc[i].next_desc_addr = &(desc[i + 1]); desc[i].start_addr = (unsigned long)buf + i*fragsize; desc[i].cfg = cfg; desc[i].x_count = x_count; desc[i].x_modify = wdsize; desc[i].y_count = ycount; desc[i].y_modify = wdsize; } /* make circular */ desc[fragcount-1].next_desc_addr = desc; pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p," "next1=%p\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n", desc, desc[0].next_desc_addr, desc+1, desc[1].next_desc_addr, desc[0].x_count, desc[0].y_count, desc[0].start_addr, desc[0].cfg); } static int sport_start(struct sport_device *sport) { enable_dma(sport->dma_rx_chan); enable_dma(sport->dma_tx_chan); sport->regs->rcr1 |= RSPEN; sport->regs->tcr1 |= TSPEN; SSYNC(); return 0; } static int sport_stop(struct sport_device *sport) { sport->regs->tcr1 &= ~TSPEN; sport->regs->rcr1 &= ~RSPEN; SSYNC(); disable_dma(sport->dma_rx_chan); disable_dma(sport->dma_tx_chan); return 0; } static inline int sport_hook_rx_dummy(struct sport_device *sport) { struct dmasg *desc, temp_desc; unsigned long flags; BUG_ON(sport->dummy_rx_desc == NULL); BUG_ON(sport->curr_rx_desc == sport->dummy_rx_desc); /* Maybe the dummy buffer descriptor ring is damaged */ sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc + 1; local_irq_save(flags); desc = get_dma_next_desc_ptr(sport->dma_rx_chan); /* Copy the descriptor which will be damaged to backup */ temp_desc = *desc; desc->x_count = sport->dummy_count / 2; desc->y_count = 0; desc->next_desc_addr = sport->dummy_rx_desc; local_irq_restore(flags); /* Waiting for dummy buffer descriptor is already hooked*/ while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) - sizeof(struct dmasg)) != sport->dummy_rx_desc) continue; sport->curr_rx_desc = sport->dummy_rx_desc; /* Restore the damaged descriptor */ *desc = temp_desc; return 0; } static inline int sport_rx_dma_start(struct sport_device *sport, int dummy) { if (dummy) { sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc; sport->curr_rx_desc = sport->dummy_rx_desc; } else sport->curr_rx_desc = sport->dma_rx_desc; set_dma_next_desc_addr(sport->dma_rx_chan, sport->curr_rx_desc); set_dma_x_count(sport->dma_rx_chan, 0); set_dma_x_modify(sport->dma_rx_chan, 0); set_dma_config(sport->dma_rx_chan, (DMAFLOW_LARGE | NDSIZE_9 | \ WDSIZE_32 | WNR)); set_dma_curr_addr(sport->dma_rx_chan, sport->curr_rx_desc->start_addr); SSYNC(); return 0; } static inline int sport_tx_dma_start(struct sport_device *sport, int dummy) { if (dummy) { sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc; sport->curr_tx_desc = sport->dummy_tx_desc; } else sport->curr_tx_desc = sport->dma_tx_desc; set_dma_next_desc_addr(sport->dma_tx_chan, sport->curr_tx_desc); set_dma_x_count(sport->dma_tx_chan, 0); set_dma_x_modify(sport->dma_tx_chan, 0); set_dma_config(sport->dma_tx_chan, (DMAFLOW_LARGE | NDSIZE_9 | WDSIZE_32)); set_dma_curr_addr(sport->dma_tx_chan, sport->curr_tx_desc->start_addr); SSYNC(); return 0; } int sport_rx_start(struct sport_device *sport) { unsigned long flags; pr_debug("%s enter\n", __func__); if (sport->rx_run) return -EBUSY; if (sport->tx_run) { /* tx is running, rx is not running */ BUG_ON(sport->dma_rx_desc == NULL); BUG_ON(sport->curr_rx_desc != sport->dummy_rx_desc); local_irq_save(flags); while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) - sizeof(struct dmasg)) != sport->dummy_rx_desc) continue; sport->dummy_rx_desc->next_desc_addr = sport->dma_rx_desc; local_irq_restore(flags); sport->curr_rx_desc = sport->dma_rx_desc; } else { sport_tx_dma_start(sport, 1); sport_rx_dma_start(sport, 0); sport_start(sport); } sport->rx_run = 1; return 0; } EXPORT_SYMBOL(sport_rx_start); int sport_rx_stop(struct sport_device *sport) { pr_debug("%s enter\n", __func__); if (!sport->rx_run) return 0; if (sport->tx_run) { /* TX dma is still running, hook the dummy buffer */ sport_hook_rx_dummy(sport); } else { /* Both rx and tx dma will be stopped */ sport_stop(sport); sport->curr_rx_desc = NULL; sport->curr_tx_desc = NULL; } sport->rx_run = 0; return 0; } EXPORT_SYMBOL(sport_rx_stop); static inline int sport_hook_tx_dummy(struct sport_device *sport) { struct dmasg *desc, temp_desc; unsigned long flags; BUG_ON(sport->dummy_tx_desc == NULL); BUG_ON(sport->curr_tx_desc == sport->dummy_tx_desc); sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc + 1; /* Shorten the time on last normal descriptor */ local_irq_save(flags); desc = get_dma_next_desc_ptr(sport->dma_tx_chan); /* Store the descriptor which will be damaged */ temp_desc = *desc; desc->x_count = sport->dummy_count / 2; desc->y_count = 0; desc->next_desc_addr = sport->dummy_tx_desc; local_irq_restore(flags); /* Waiting for dummy buffer descriptor is already hooked*/ while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - \ sizeof(struct dmasg)) != sport->dummy_tx_desc) continue; sport->curr_tx_desc = sport->dummy_tx_desc; /* Restore the damaged descriptor */ *desc = temp_desc; return 0; } int sport_tx_start(struct sport_device *sport) { unsigned long flags; pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__, sport->tx_run, sport->rx_run); if (sport->tx_run) return -EBUSY; if (sport->rx_run) { BUG_ON(sport->dma_tx_desc == NULL); BUG_ON(sport->curr_tx_desc != sport->dummy_tx_desc); /* Hook the normal buffer descriptor */ local_irq_save(flags); while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - sizeof(struct dmasg)) != sport->dummy_tx_desc) continue; sport->dummy_tx_desc->next_desc_addr = sport->dma_tx_desc; local_irq_restore(flags); sport->curr_tx_desc = sport->dma_tx_desc; } else { sport_tx_dma_start(sport, 0); /* Let rx dma run the dummy buffer */ sport_rx_dma_start(sport, 1); sport_start(sport); } sport->tx_run = 1; return 0; } EXPORT_SYMBOL(sport_tx_start); int sport_tx_stop(struct sport_device *sport) { if (!sport->tx_run) return 0; if (sport->rx_run) { /* RX is still running, hook the dummy buffer */ sport_hook_tx_dummy(sport); } else { /* Both rx and tx dma stopped */ sport_stop(sport); sport->curr_rx_desc = NULL; sport->curr_tx_desc = NULL; } sport->tx_run = 0; return 0; } EXPORT_SYMBOL(sport_tx_stop); static inline int compute_wdsize(size_t wdsize) { switch (wdsize) { case 1: return WDSIZE_8; case 2: return WDSIZE_16; case 4: default: return WDSIZE_32; } } int sport_config_rx_dma(struct sport_device *sport, void *buf, int fragcount, size_t fragsize) { unsigned int x_count; unsigned int y_count; unsigned int cfg; dma_addr_t addr; pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__, \ buf, fragcount, fragsize); x_count = fragsize / sport->wdsize; y_count = 0; /* for fragments larger than 64k words we use 2d dma, * denote fragecount as two numbers' mutliply and both of them * are less than 64k.*/ if (x_count >= 0x10000) { int i, count = x_count; for (i = 16; i > 0; i--) { x_count = 1 << i; if ((count & (x_count - 1)) == 0) { y_count = count >> i; if (y_count < 0x10000) break; } } if (i == 0) return -EINVAL; } pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__, x_count, y_count); if (sport->dma_rx_desc) dma_free_coherent(NULL, sport->rx_desc_bytes, sport->dma_rx_desc, 0); /* Allocate a new descritor ring as current one. */ sport->dma_rx_desc = dma_alloc_coherent(NULL, \ fragcount * sizeof(struct dmasg), &addr, 0); sport->rx_desc_bytes = fragcount * sizeof(struct dmasg); if (!sport->dma_rx_desc) { pr_err("Failed to allocate memory for rx desc\n"); return -ENOMEM; } sport->rx_buf = buf; sport->rx_fragsize = fragsize; sport->rx_frags = fragcount; cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | WNR | \ (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */ if (y_count != 0) cfg |= DMA2D; setup_desc(sport->dma_rx_desc, buf, fragcount, fragsize, cfg|DMAEN, x_count, y_count, sport->wdsize); return 0; } EXPORT_SYMBOL(sport_config_rx_dma); int sport_config_tx_dma(struct sport_device *sport, void *buf, \ int fragcount, size_t fragsize) { unsigned int x_count; unsigned int y_count; unsigned int cfg; dma_addr_t addr; pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n", __func__, buf, fragcount, fragsize); x_count = fragsize/sport->wdsize; y_count = 0; /* for fragments larger than 64k words we use 2d dma, * denote fragecount as two numbers' mutliply and both of them * are less than 64k.*/ if (x_count >= 0x10000) { int i, count = x_count; for (i = 16; i > 0; i--) { x_count = 1 << i; if ((count & (x_count - 1)) == 0) { y_count = count >> i; if (y_count < 0x10000) break; } } if (i == 0) return -EINVAL; } pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__, x_count, y_count); if (sport->dma_tx_desc) { dma_free_coherent(NULL, sport->tx_desc_bytes, \ sport->dma_tx_desc, 0); } sport->dma_tx_desc = dma_alloc_coherent(NULL, \ fragcount * sizeof(struct dmasg), &addr, 0); sport->tx_desc_bytes = fragcount * sizeof(struct dmasg); if (!sport->dma_tx_desc) { pr_err("Failed to allocate memory for tx desc\n"); return -ENOMEM; } sport->tx_buf = buf; sport->tx_fragsize = fragsize; sport->tx_frags = fragcount; cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | \ (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */ if (y_count != 0) cfg |= DMA2D; setup_desc(sport->dma_tx_desc, buf, fragcount, fragsize, cfg|DMAEN, x_count, y_count, sport->wdsize); return 0; } EXPORT_SYMBOL(sport_config_tx_dma); /* setup dummy dma descriptor ring, which don't generate interrupts, * the x_modify is set to 0 */ static int sport_config_rx_dummy(struct sport_device *sport) { struct dmasg *desc; unsigned config; pr_debug("%s entered\n", __func__); if (L1_DATA_A_LENGTH) desc = l1_data_sram_zalloc(2 * sizeof(*desc)); else { dma_addr_t addr; desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); memset(desc, 0, 2 * sizeof(*desc)); } if (desc == NULL) { pr_err("Failed to allocate memory for dummy rx desc\n"); return -ENOMEM; } sport->dummy_rx_desc = desc; desc->start_addr = (unsigned long)sport->dummy_buf; config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize) | WNR | DMAEN; desc->cfg = config; desc->x_count = sport->dummy_count/sport->wdsize; desc->x_modify = sport->wdsize; desc->y_count = 0; desc->y_modify = 0; memcpy(desc+1, desc, sizeof(*desc)); desc->next_desc_addr = desc + 1; desc[1].next_desc_addr = desc; return 0; } static int sport_config_tx_dummy(struct sport_device *sport) { struct dmasg *desc; unsigned int config; pr_debug("%s entered\n", __func__); if (L1_DATA_A_LENGTH) desc = l1_data_sram_zalloc(2 * sizeof(*desc)); else { dma_addr_t addr; desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); memset(desc, 0, 2 * sizeof(*desc)); } if (!desc) { pr_err("Failed to allocate memory for dummy tx desc\n"); return -ENOMEM; } sport->dummy_tx_desc = desc; desc->start_addr = (unsigned long)sport->dummy_buf + \ sport->dummy_count; config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize) | DMAEN; desc->cfg = config; desc->x_count = sport->dummy_count/sport->wdsize; desc->x_modify = sport->wdsize; desc->y_count = 0; desc->y_modify = 0; memcpy(desc+1, desc, sizeof(*desc)); desc->next_desc_addr = desc + 1; desc[1].next_desc_addr = desc; return 0; } unsigned long sport_curr_offset_rx(struct sport_device *sport) { unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan); return (unsigned char *)curr - sport->rx_buf; } EXPORT_SYMBOL(sport_curr_offset_rx); unsigned long sport_curr_offset_tx(struct sport_device *sport) { unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan); return (unsigned char *)curr - sport->tx_buf; } EXPORT_SYMBOL(sport_curr_offset_tx); void sport_incfrag(struct sport_device *sport, int *frag, int tx) { ++(*frag); if (tx == 1 && *frag == sport->tx_frags) *frag = 0; if (tx == 0 && *frag == sport->rx_frags) *frag = 0; } EXPORT_SYMBOL(sport_incfrag); void sport_decfrag(struct sport_device *sport, int *frag, int tx) { --(*frag); if (tx == 1 && *frag == 0) *frag = sport->tx_frags; if (tx == 0 && *frag == 0) *frag = sport->rx_frags; } EXPORT_SYMBOL(sport_decfrag); static int sport_check_status(struct sport_device *sport, unsigned int *sport_stat, unsigned int *rx_stat, unsigned int *tx_stat) { int status = 0; if (sport_stat) { SSYNC(); status = sport->regs->stat; if (status & (TOVF|TUVF|ROVF|RUVF)) sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF)); SSYNC(); *sport_stat = status; } if (rx_stat) { SSYNC(); status = get_dma_curr_irqstat(sport->dma_rx_chan); if (status & (DMA_DONE|DMA_ERR)) clear_dma_irqstat(sport->dma_rx_chan); SSYNC(); *rx_stat = status; } if (tx_stat) { SSYNC(); status = get_dma_curr_irqstat(sport->dma_tx_chan); if (status & (DMA_DONE|DMA_ERR)) clear_dma_irqstat(sport->dma_tx_chan); SSYNC(); *tx_stat = status; } return 0; } int sport_dump_stat(struct sport_device *sport, char *buf, size_t len) { int ret; ret = snprintf(buf, len, "sts: 0x%04x\n" "rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n", sport->regs->stat, sport->dma_rx_chan, get_dma_curr_irqstat(sport->dma_rx_chan), sport->dma_tx_chan, get_dma_curr_irqstat(sport->dma_tx_chan)); buf += ret; len -= ret; ret += snprintf(buf, len, "curr_rx_desc:0x%p, curr_tx_desc:0x%p\n" "dma_rx_desc:0x%p, dma_tx_desc:0x%p\n" "dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n", sport->curr_rx_desc, sport->curr_tx_desc, sport->dma_rx_desc, sport->dma_tx_desc, sport->dummy_rx_desc, sport->dummy_tx_desc); return ret; } static irqreturn_t rx_handler(int irq, void *dev_id) { unsigned int rx_stat; struct sport_device *sport = dev_id; pr_debug("%s enter\n", __func__); sport_check_status(sport, NULL, &rx_stat, NULL); if (!(rx_stat & DMA_DONE)) pr_err("rx dma is already stopped\n"); if (sport->rx_callback) { sport->rx_callback(sport->rx_data); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t tx_handler(int irq, void *dev_id) { unsigned int tx_stat; struct sport_device *sport = dev_id; pr_debug("%s enter\n", __func__); sport_check_status(sport, NULL, NULL, &tx_stat); if (!(tx_stat & DMA_DONE)) { pr_err("tx dma is already stopped\n"); return IRQ_HANDLED; } if (sport->tx_callback) { sport->tx_callback(sport->tx_data); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t err_handler(int irq, void *dev_id) { unsigned int status = 0; struct sport_device *sport = dev_id; pr_debug("%s\n", __func__); if (sport_check_status(sport, &status, NULL, NULL)) { pr_err("error checking status ??"); return IRQ_NONE; } if (status & (TOVF|TUVF|ROVF|RUVF)) { pr_info("sport status error:%s%s%s%s\n", status & TOVF ? " TOVF" : "", status & TUVF ? " TUVF" : "", status & ROVF ? " ROVF" : "", status & RUVF ? " RUVF" : ""); if (status & TOVF || status & TUVF) { disable_dma(sport->dma_tx_chan); if (sport->tx_run) sport_tx_dma_start(sport, 0); else sport_tx_dma_start(sport, 1); enable_dma(sport->dma_tx_chan); } else { disable_dma(sport->dma_rx_chan); if (sport->rx_run) sport_rx_dma_start(sport, 0); else sport_rx_dma_start(sport, 1); enable_dma(sport->dma_rx_chan); } } status = sport->regs->stat; if (status & (TOVF|TUVF|ROVF|RUVF)) sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF)); SSYNC(); if (sport->err_callback) sport->err_callback(sport->err_data); return IRQ_HANDLED; } int sport_set_rx_callback(struct sport_device *sport, void (*rx_callback)(void *), void *rx_data) { BUG_ON(rx_callback == NULL); sport->rx_callback = rx_callback; sport->rx_data = rx_data; return 0; } EXPORT_SYMBOL(sport_set_rx_callback); int sport_set_tx_callback(struct sport_device *sport, void (*tx_callback)(void *), void *tx_data) { BUG_ON(tx_callback == NULL); sport->tx_callback = tx_callback; sport->tx_data = tx_data; return 0; } EXPORT_SYMBOL(sport_set_tx_callback); int sport_set_err_callback(struct sport_device *sport, void (*err_callback)(void *), void *err_data) { BUG_ON(err_callback == NULL); sport->err_callback = err_callback; sport->err_data = err_data; return 0; } EXPORT_SYMBOL(sport_set_err_callback); static int sport_config_pdev(struct platform_device *pdev, struct sport_param *param) { /* Extract settings from platform data */ struct device *dev = &pdev->dev; struct bfin_snd_platform_data *pdata = dev->platform_data; struct resource *res; param->num = pdev->id; if (!pdata) { dev_err(dev, "no platform_data\n"); return -ENODEV; } param->pin_req = pdata->pin_req; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "no MEM resource\n"); return -ENODEV; } param->regs = (struct sport_register *)res->start; /* first RX, then TX */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(dev, "no rx DMA resource\n"); return -ENODEV; } param->dma_rx_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(dev, "no tx DMA resource\n"); return -ENODEV; } param->dma_tx_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "no irq resource\n"); return -ENODEV; } param->err_irq = res->start; return 0; } struct sport_device *sport_init(struct platform_device *pdev, unsigned int wdsize, unsigned int dummy_count, size_t priv_size) { struct device *dev = &pdev->dev; struct sport_param param; struct sport_device *sport; int ret; dev_dbg(dev, "%s enter\n", __func__); param.wdsize = wdsize; param.dummy_count = dummy_count; BUG_ON(param.wdsize == 0 || param.dummy_count == 0); ret = sport_config_pdev(pdev, &param); if (ret) return NULL; if (peripheral_request_list(param.pin_req, "soc-audio")) { dev_err(dev, "requesting Peripherals failed\n"); return NULL; } sport = kzalloc(sizeof(*sport), GFP_KERNEL); if (!sport) { dev_err(dev, "failed to allocate for sport device\n"); goto __init_err0; } sport->num = param.num; sport->dma_rx_chan = param.dma_rx_chan; sport->dma_tx_chan = param.dma_tx_chan; sport->err_irq = param.err_irq; sport->regs = param.regs; sport->pin_req = param.pin_req; if (request_dma(sport->dma_rx_chan, "SPORT RX Data") == -EBUSY) { dev_err(dev, "failed to request RX dma %d\n", sport->dma_rx_chan); goto __init_err1; } if (set_dma_callback(sport->dma_rx_chan, rx_handler, sport) != 0) { dev_err(dev, "failed to request RX irq %d\n", sport->dma_rx_chan); goto __init_err2; } if (request_dma(sport->dma_tx_chan, "SPORT TX Data") == -EBUSY) { dev_err(dev, "failed to request TX dma %d\n", sport->dma_tx_chan); goto __init_err2; } if (set_dma_callback(sport->dma_tx_chan, tx_handler, sport) != 0) { dev_err(dev, "failed to request TX irq %d\n", sport->dma_tx_chan); goto __init_err3; } if (request_irq(sport->err_irq, err_handler, IRQF_SHARED, "SPORT err", sport) < 0) { dev_err(dev, "failed to request err irq %d\n", sport->err_irq); goto __init_err3; } dev_info(dev, "dma rx:%d tx:%d, err irq:%d, regs:%p\n", sport->dma_rx_chan, sport->dma_tx_chan, sport->err_irq, sport->regs); sport->wdsize = param.wdsize; sport->dummy_count = param.dummy_count; sport->private_data = kzalloc(priv_size, GFP_KERNEL); if (!sport->private_data) { dev_err(dev, "could not alloc priv data %zu bytes\n", priv_size); goto __init_err4; } if (L1_DATA_A_LENGTH) sport->dummy_buf = l1_data_sram_zalloc(param.dummy_count * 2); else sport->dummy_buf = kzalloc(param.dummy_count * 2, GFP_KERNEL); if (sport->dummy_buf == NULL) { dev_err(dev, "failed to allocate dummy buffer\n"); goto __error1; } ret = sport_config_rx_dummy(sport); if (ret) { dev_err(dev, "failed to config rx dummy ring\n"); goto __error2; } ret = sport_config_tx_dummy(sport); if (ret) { dev_err(dev, "failed to config tx dummy ring\n"); goto __error3; } platform_set_drvdata(pdev, sport); return sport; __error3: if (L1_DATA_A_LENGTH) l1_data_sram_free(sport->dummy_rx_desc); else dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_rx_desc, 0); __error2: if (L1_DATA_A_LENGTH) l1_data_sram_free(sport->dummy_buf); else kfree(sport->dummy_buf); __error1: kfree(sport->private_data); __init_err4: free_irq(sport->err_irq, sport); __init_err3: free_dma(sport->dma_tx_chan); __init_err2: free_dma(sport->dma_rx_chan); __init_err1: kfree(sport); __init_err0: peripheral_free_list(param.pin_req); return NULL; } EXPORT_SYMBOL(sport_init); void sport_done(struct sport_device *sport) { if (sport == NULL) return; sport_stop(sport); if (sport->dma_rx_desc) dma_free_coherent(NULL, sport->rx_desc_bytes, sport->dma_rx_desc, 0); if (sport->dma_tx_desc) dma_free_coherent(NULL, sport->tx_desc_bytes, sport->dma_tx_desc, 0); #if L1_DATA_A_LENGTH != 0 l1_data_sram_free(sport->dummy_rx_desc); l1_data_sram_free(sport->dummy_tx_desc); l1_data_sram_free(sport->dummy_buf); #else dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_rx_desc, 0); dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_tx_desc, 0); kfree(sport->dummy_buf); #endif free_dma(sport->dma_rx_chan); free_dma(sport->dma_tx_chan); free_irq(sport->err_irq, sport); kfree(sport->private_data); peripheral_free_list(sport->pin_req); kfree(sport); } EXPORT_SYMBOL(sport_done); /* * It is only used to send several bytes when dma is not enabled * sport controller is configured but not enabled. * Multichannel cannot works with pio mode */ /* Used by ac97 to write and read codec register */ int sport_send_and_recv(struct sport_device *sport, u8 *out_data, \ u8 *in_data, int len) { unsigned short dma_config; unsigned short status; unsigned long flags; unsigned long wait = 0; pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \ __func__, out_data, in_data, len); pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n" "mcmc1:0x%04x, mcmc2:0x%04x\n", sport->regs->tcr1, sport->regs->tcr2, sport->regs->tclkdiv, sport->regs->tfsdiv, sport->regs->mcmc1, sport->regs->mcmc2); flush_dcache_range((unsigned)out_data, (unsigned)(out_data + len)); /* Enable tx dma */ dma_config = (RESTART | WDSIZE_16 | DI_EN); set_dma_start_addr(sport->dma_tx_chan, (unsigned long)out_data); set_dma_x_count(sport->dma_tx_chan, len/2); set_dma_x_modify(sport->dma_tx_chan, 2); set_dma_config(sport->dma_tx_chan, dma_config); enable_dma(sport->dma_tx_chan); if (in_data != NULL) { invalidate_dcache_range((unsigned)in_data, \ (unsigned)(in_data + len)); /* Enable rx dma */ dma_config = (RESTART | WDSIZE_16 | WNR | DI_EN); set_dma_start_addr(sport->dma_rx_chan, (unsigned long)in_data); set_dma_x_count(sport->dma_rx_chan, len/2); set_dma_x_modify(sport->dma_rx_chan, 2); set_dma_config(sport->dma_rx_chan, dma_config); enable_dma(sport->dma_rx_chan); } local_irq_save(flags); sport->regs->tcr1 |= TSPEN; sport->regs->rcr1 |= RSPEN; SSYNC(); status = get_dma_curr_irqstat(sport->dma_tx_chan); while (status & DMA_RUN) { udelay(1); status = get_dma_curr_irqstat(sport->dma_tx_chan); pr_debug("DMA status:0x%04x\n", status); if (wait++ > 100) goto __over; } status = sport->regs->stat; wait = 0; while (!(status & TXHRE)) { pr_debug("sport status:0x%04x\n", status); udelay(1); status = *(unsigned short *)&sport->regs->stat; if (wait++ > 1000) goto __over; } /* Wait for the last byte sent out */ udelay(20); pr_debug("sport status:0x%04x\n", status); __over: sport->regs->tcr1 &= ~TSPEN; sport->regs->rcr1 &= ~RSPEN; SSYNC(); disable_dma(sport->dma_tx_chan); /* Clear the status */ clear_dma_irqstat(sport->dma_tx_chan); if (in_data != NULL) { disable_dma(sport->dma_rx_chan); clear_dma_irqstat(sport->dma_rx_chan); } SSYNC(); local_irq_restore(flags); return 0; } EXPORT_SYMBOL(sport_send_and_recv); MODULE_AUTHOR("Roy Huang"); MODULE_DESCRIPTION("SPORT driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
xjljian/android_kernel_huawei_msm8226
drivers/media/video/cpia2/cpia2_core.c
7997
76309
/**************************************************************************** * * Filename: cpia2_core.c * * Copyright 2001, STMicrolectronics, Inc. * Contact: steve.miller@st.com * * Description: * This is a USB driver for CPia2 based video cameras. * The infrastructure of this driver is based on the cpia usb driver by * Jochen Scharrlach and Johannes Erdfeldt. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Stripped of 2.4 stuff ready for main kernel submit by * Alan Cox <alan@lxorguk.ukuu.org.uk> * ****************************************************************************/ #include "cpia2.h" #include <linux/slab.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/firmware.h> /* #define _CPIA2_DEBUG_ */ #ifdef _CPIA2_DEBUG_ static const char *block_name[] = { "System", "VC", "VP", "IDATA" }; #endif static unsigned int debugs_on; /* default 0 - DEBUG_REG */ /****************************************************************************** * * Forward Declarations * *****************************************************************************/ static int apply_vp_patch(struct camera_data *cam); static int set_default_user_mode(struct camera_data *cam); static int set_vw_size(struct camera_data *cam, int size); static int configure_sensor(struct camera_data *cam, int reqwidth, int reqheight); static int config_sensor_410(struct camera_data *cam, int reqwidth, int reqheight); static int config_sensor_500(struct camera_data *cam, int reqwidth, int reqheight); static int set_all_properties(struct camera_data *cam); static void get_color_params(struct camera_data *cam); static void wake_system(struct camera_data *cam); static void set_lowlight_boost(struct camera_data *cam); static void reset_camera_struct(struct camera_data *cam); static int cpia2_set_high_power(struct camera_data *cam); /* Here we want the physical address of the memory. * This is used when initializing the contents of the * area and marking the pages as reserved. */ static inline unsigned long kvirt_to_pa(unsigned long adr) { unsigned long kva, ret; kva = (unsigned long) page_address(vmalloc_to_page((void *)adr)); kva |= adr & (PAGE_SIZE-1); /* restore the offset */ ret = __pa(kva); return ret; } static void *rvmalloc(unsigned long size) { void *mem; unsigned long adr; /* Round it off to PAGE_SIZE */ size = PAGE_ALIGN(size); mem = vmalloc_32(size); if (!mem) return NULL; memset(mem, 0, size); /* Clear the ram out, no junk to the user */ adr = (unsigned long) mem; while ((long)size > 0) { SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } return mem; } static void rvfree(void *mem, unsigned long size) { unsigned long adr; if (!mem) return; size = PAGE_ALIGN(size); adr = (unsigned long) mem; while ((long)size > 0) { ClearPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(mem); } /****************************************************************************** * * cpia2_do_command * * Send an arbitrary command to the camera. For commands that read from * the camera, copy the buffers into the proper param structures. *****************************************************************************/ int cpia2_do_command(struct camera_data *cam, u32 command, u8 direction, u8 param) { int retval = 0; struct cpia2_command cmd; unsigned int device = cam->params.pnp_id.device_type; cmd.command = command; cmd.reg_count = 2; /* default */ cmd.direction = direction; /*** * Set up the command. ***/ switch (command) { case CPIA2_CMD_GET_VERSION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.start = CPIA2_SYSTEM_DEVICE_HI; break; case CPIA2_CMD_GET_PNP_ID: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 8; cmd.start = CPIA2_SYSTEM_DESCRIP_VID_HI; break; case CPIA2_CMD_GET_ASIC_TYPE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.start = CPIA2_VC_ASIC_ID; break; case CPIA2_CMD_GET_SENSOR: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.start = CPIA2_VP_SENSOR_FLAGS; break; case CPIA2_CMD_GET_VP_DEVICE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.start = CPIA2_VP_DEVICEH; break; case CPIA2_CMD_SET_VP_BRIGHTNESS: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_BRIGHTNESS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_EXPOSURE_TARGET; else cmd.start = CPIA2_VP5_EXPOSURE_TARGET; break; case CPIA2_CMD_SET_CONTRAST: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_CONTRAST: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_YRANGE; break; case CPIA2_CMD_SET_VP_SATURATION: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_SATURATION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP_SATURATION; else cmd.start = CPIA2_VP5_MCUVSATURATION; break; case CPIA2_CMD_SET_VP_GPIO_DATA: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_GPIO_DATA; break; case CPIA2_CMD_SET_VP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_GPIO_DIRECTION; break; case CPIA2_CMD_SET_VC_MP_GPIO_DATA: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VC_MP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_MP_DATA; break; case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_MP_DIR; break; case CPIA2_CMD_ENABLE_PACKET_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.start = CPIA2_SYSTEM_INT_PACKET_CTRL; cmd.reg_count = 1; cmd.buffer.block_data[0] = param; break; case CPIA2_CMD_SET_FLICKER_MODES: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_FLICKER_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_FLICKER_MODES; break; case CPIA2_CMD_RESET_FIFO: /* clear fifo and enable stream block */ cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.reg_count = 2; cmd.start = 0; cmd.buffer.registers[0].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[0].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT; cmd.buffer.registers[1].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[1].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT | CPIA2_VC_ST_CTRL_FIFO_ENABLE; break; case CPIA2_CMD_SET_HI_POWER: cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_SYSTEM; cmd.reg_count = 2; cmd.buffer.registers[0].index = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.registers[1].index = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.registers[0].value = CPIA2_SYSTEM_CONTROL_CLEAR_ERR; cmd.buffer.registers[1].value = CPIA2_SYSTEM_CONTROL_HIGH_POWER; break; case CPIA2_CMD_SET_LOW_POWER: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.block_data[0] = 0; break; case CPIA2_CMD_CLEAR_V2W_ERR: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.block_data[0] = CPIA2_SYSTEM_CONTROL_CLEAR_ERR; break; case CPIA2_CMD_SET_USER_MODE: /* Then fall through */ cmd.buffer.block_data[0] = param; case CPIA2_CMD_GET_USER_MODE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_USER_MODE; else cmd.start = CPIA2_VP5_USER_MODE; break; case CPIA2_CMD_FRAMERATE_REQ: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_FRAMERATE_REQUEST; else cmd.start = CPIA2_VP5_FRAMERATE_REQUEST; cmd.buffer.block_data[0] = param; break; case CPIA2_CMD_SET_WAKEUP: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_WAKEUP: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_WAKEUP; break; case CPIA2_CMD_SET_PW_CONTROL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_PW_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_PW_CTRL; break; case CPIA2_CMD_GET_VP_SYSTEM_STATE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_SYSTEMSTATE; break; case CPIA2_CMD_SET_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; break; case CPIA2_CMD_SET_VP_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_SYSTEMCTRL; break; case CPIA2_CMD_SET_VP_EXP_MODES: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_EXP_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_EXPOSURE_MODES; break; case CPIA2_CMD_SET_DEVICE_CONFIG: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_DEVICE_CONFIG: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_DEVICE_CONFIG; break; case CPIA2_CMD_SET_SERIAL_ADDR: cmd.buffer.block_data[0] = param; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_VP_SERIAL_ADDR; break; case CPIA2_CMD_SET_SENSOR_CR1: cmd.buffer.block_data[0] = param; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_SENSOR_CR1; break; case CPIA2_CMD_SET_VC_CONTROL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VC_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_VC_CTRL; break; case CPIA2_CMD_SET_TARGET_KB: cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.buffer.registers[0].index = CPIA2_VC_VC_TARGET_KB; cmd.buffer.registers[0].value = param; break; case CPIA2_CMD_SET_DEF_JPEG_OPT: cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.reg_count = 4; cmd.buffer.registers[0].index = CPIA2_VC_VC_JPEG_OPT; cmd.buffer.registers[0].value = CPIA2_VC_VC_JPEG_OPT_DOUBLE_SQUEEZE; cmd.buffer.registers[1].index = CPIA2_VC_VC_USER_SQUEEZE; cmd.buffer.registers[1].value = 20; cmd.buffer.registers[2].index = CPIA2_VC_VC_CREEP_PERIOD; cmd.buffer.registers[2].value = 2; cmd.buffer.registers[3].index = CPIA2_VC_VC_JPEG_OPT; cmd.buffer.registers[3].value = CPIA2_VC_VC_JPEG_OPT_DEFAULT; break; case CPIA2_CMD_REHASH_VP4: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_REHASH_VALUES; cmd.buffer.block_data[0] = param; break; case CPIA2_CMD_SET_USER_EFFECTS: /* Note: Be careful with this as this register can also affect flicker modes */ cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_USER_EFFECTS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_USER_EFFECTS; else cmd.start = CPIA2_VP5_USER_EFFECTS; break; default: LOG("DoCommand received invalid command\n"); return -EINVAL; } retval = cpia2_send_command(cam, &cmd); if (retval) { return retval; } /*** * Now copy any results from a read into the appropriate param struct. ***/ switch (command) { case CPIA2_CMD_GET_VERSION: cam->params.version.firmware_revision_hi = cmd.buffer.block_data[0]; cam->params.version.firmware_revision_lo = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_PNP_ID: cam->params.pnp_id.vendor = (cmd.buffer.block_data[0] << 8) | cmd.buffer.block_data[1]; cam->params.pnp_id.product = (cmd.buffer.block_data[2] << 8) | cmd.buffer.block_data[3]; cam->params.pnp_id.device_revision = (cmd.buffer.block_data[4] << 8) | cmd.buffer.block_data[5]; if (cam->params.pnp_id.vendor == 0x553) { if (cam->params.pnp_id.product == 0x100) { cam->params.pnp_id.device_type = DEVICE_STV_672; } else if (cam->params.pnp_id.product == 0x140 || cam->params.pnp_id.product == 0x151) { cam->params.pnp_id.device_type = DEVICE_STV_676; } } break; case CPIA2_CMD_GET_ASIC_TYPE: cam->params.version.asic_id = cmd.buffer.block_data[0]; cam->params.version.asic_rev = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_SENSOR: cam->params.version.sensor_flags = cmd.buffer.block_data[0]; cam->params.version.sensor_rev = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_VP_DEVICE: cam->params.version.vp_device_hi = cmd.buffer.block_data[0]; cam->params.version.vp_device_lo = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_VP_BRIGHTNESS: cam->params.color_params.brightness = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_CONTRAST: cam->params.color_params.contrast = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_SATURATION: cam->params.color_params.saturation = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_GPIO_DATA: cam->params.vp_params.gpio_data = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_GPIO_DIRECTION: cam->params.vp_params.gpio_direction = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION: cam->params.vc_params.vc_mp_direction =cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VC_MP_GPIO_DATA: cam->params.vc_params.vc_mp_data = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_FLICKER_MODES: cam->params.flicker_control.cam_register = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_WAKEUP: cam->params.vc_params.wakeup = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_PW_CONTROL: cam->params.vc_params.pw_control = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_SYSTEM_CTRL: cam->params.camera_state.system_ctrl = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_SYSTEM_STATE: cam->params.vp_params.system_state = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_SYSTEM_CTRL: cam->params.vp_params.system_ctrl = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_EXP_MODES: cam->params.vp_params.exposure_modes = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_DEVICE_CONFIG: cam->params.vp_params.device_config = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VC_CONTROL: cam->params.vc_params.vc_control = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_USER_MODE: cam->params.vp_params.video_mode = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_USER_EFFECTS: cam->params.vp_params.user_effects = cmd.buffer.block_data[0]; break; default: break; } return retval; } /****************************************************************************** * * cpia2_send_command * *****************************************************************************/ #define DIR(cmd) ((cmd->direction == TRANSFER_WRITE) ? "Write" : "Read") #define BINDEX(cmd) (cmd->req_mode & 0x03) int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd) { u8 count; u8 start; u8 *buffer; int retval; switch (cmd->req_mode & 0x0c) { case CAMERAACCESS_TYPE_RANDOM: count = cmd->reg_count * sizeof(struct cpia2_register); start = 0; buffer = (u8 *) & cmd->buffer; if (debugs_on & DEBUG_REG) DBG("%s Random: Register block %s\n", DIR(cmd), block_name[BINDEX(cmd)]); break; case CAMERAACCESS_TYPE_BLOCK: count = cmd->reg_count; start = cmd->start; buffer = cmd->buffer.block_data; if (debugs_on & DEBUG_REG) DBG("%s Block: Register block %s\n", DIR(cmd), block_name[BINDEX(cmd)]); break; case CAMERAACCESS_TYPE_MASK: count = cmd->reg_count * sizeof(struct cpia2_reg_mask); start = 0; buffer = (u8 *) & cmd->buffer; if (debugs_on & DEBUG_REG) DBG("%s Mask: Register block %s\n", DIR(cmd), block_name[BINDEX(cmd)]); break; case CAMERAACCESS_TYPE_REPEAT: /* For patch blocks only */ count = cmd->reg_count; start = cmd->start; buffer = cmd->buffer.block_data; if (debugs_on & DEBUG_REG) DBG("%s Repeat: Register block %s\n", DIR(cmd), block_name[BINDEX(cmd)]); break; default: LOG("%s: invalid request mode\n",__func__); return -EINVAL; } retval = cpia2_usb_transfer_cmd(cam, buffer, cmd->req_mode, start, count, cmd->direction); #ifdef _CPIA2_DEBUG_ if (debugs_on & DEBUG_REG) { int i; for (i = 0; i < cmd->reg_count; i++) { if((cmd->req_mode & 0x0c) == CAMERAACCESS_TYPE_BLOCK) KINFO("%s Block: [0x%02X] = 0x%02X\n", DIR(cmd), start + i, buffer[i]); if((cmd->req_mode & 0x0c) == CAMERAACCESS_TYPE_RANDOM) KINFO("%s Random: [0x%02X] = 0x%02X\n", DIR(cmd), cmd->buffer.registers[i].index, cmd->buffer.registers[i].value); } } #endif return retval; }; /************* * Functions to implement camera functionality *************/ /****************************************************************************** * * cpia2_get_version_info * *****************************************************************************/ static void cpia2_get_version_info(struct camera_data *cam) { cpia2_do_command(cam, CPIA2_CMD_GET_VERSION, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_PNP_ID, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_ASIC_TYPE, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_SENSOR, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VP_DEVICE, TRANSFER_READ, 0); } /****************************************************************************** * * cpia2_reset_camera * * Called at least during the open process, sets up initial params. *****************************************************************************/ int cpia2_reset_camera(struct camera_data *cam) { u8 tmp_reg; int retval = 0; int i; struct cpia2_command cmd; /*** * VC setup ***/ retval = configure_sensor(cam, cam->params.roi.width, cam->params.roi.height); if (retval < 0) { ERR("Couldn't configure sensor, error=%d\n", retval); return retval; } /* Clear FIFO and route/enable stream block */ cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.direction = TRANSFER_WRITE; cmd.reg_count = 2; cmd.buffer.registers[0].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[0].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT; cmd.buffer.registers[1].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[1].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT | CPIA2_VC_ST_CTRL_FIFO_ENABLE; cpia2_send_command(cam, &cmd); cpia2_set_high_power(cam); if (cam->params.pnp_id.device_type == DEVICE_STV_672) { /* Enable button notification */ cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_SYSTEM; cmd.buffer.registers[0].index = CPIA2_SYSTEM_INT_PACKET_CTRL; cmd.buffer.registers[0].value = CPIA2_SYSTEM_INT_PACKET_CTRL_ENABLE_SW_XX; cmd.reg_count = 1; cpia2_send_command(cam, &cmd); } schedule_timeout_interruptible(msecs_to_jiffies(100)); if (cam->params.pnp_id.device_type == DEVICE_STV_672) retval = apply_vp_patch(cam); /* wait for vp to go to sleep */ schedule_timeout_interruptible(msecs_to_jiffies(100)); /*** * If this is a 676, apply VP5 fixes before we start streaming ***/ if (cam->params.pnp_id.device_type == DEVICE_STV_676) { cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP; /* The following writes improve the picture */ cmd.buffer.registers[0].index = CPIA2_VP5_MYBLACK_LEVEL; cmd.buffer.registers[0].value = 0; /* reduce from the default * rec 601 pedestal of 16 */ cmd.buffer.registers[1].index = CPIA2_VP5_MCYRANGE; cmd.buffer.registers[1].value = 0x92; /* increase from 100% to * (256/256 - 31) to fill * available range */ cmd.buffer.registers[2].index = CPIA2_VP5_MYCEILING; cmd.buffer.registers[2].value = 0xFF; /* Increase from the * default rec 601 ceiling * of 240 */ cmd.buffer.registers[3].index = CPIA2_VP5_MCUVSATURATION; cmd.buffer.registers[3].value = 0xFF; /* Increase from the rec * 601 100% level (128) * to 145-192 */ cmd.buffer.registers[4].index = CPIA2_VP5_ANTIFLKRSETUP; cmd.buffer.registers[4].value = 0x80; /* Inhibit the * anti-flicker */ /* The following 4 writes are a fix to allow QVGA to work at 30 fps */ cmd.buffer.registers[5].index = CPIA2_VP_RAM_ADDR_H; cmd.buffer.registers[5].value = 0x01; cmd.buffer.registers[6].index = CPIA2_VP_RAM_ADDR_L; cmd.buffer.registers[6].value = 0xE3; cmd.buffer.registers[7].index = CPIA2_VP_RAM_DATA; cmd.buffer.registers[7].value = 0x02; cmd.buffer.registers[8].index = CPIA2_VP_RAM_DATA; cmd.buffer.registers[8].value = 0xFC; cmd.direction = TRANSFER_WRITE; cmd.reg_count = 9; cpia2_send_command(cam, &cmd); } /* Activate all settings and start the data stream */ /* Set user mode */ set_default_user_mode(cam); /* Give VP time to wake up */ schedule_timeout_interruptible(msecs_to_jiffies(100)); set_all_properties(cam); cpia2_do_command(cam, CPIA2_CMD_GET_USER_MODE, TRANSFER_READ, 0); DBG("After SetAllProperties(cam), user mode is 0x%0X\n", cam->params.vp_params.video_mode); /*** * Set audio regulator off. This and the code to set the compresison * state are too complex to form a CPIA2_CMD_, and seem to be somewhat * intertwined. This stuff came straight from the windows driver. ***/ /* Turn AutoExposure off in VP and enable the serial bridge to the sensor */ cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_CTRL, TRANSFER_READ, 0); tmp_reg = cam->params.vp_params.system_ctrl; cmd.buffer.registers[0].value = tmp_reg & (tmp_reg & (CPIA2_VP_SYSTEMCTRL_HK_CONTROL ^ 0xFF)); cpia2_do_command(cam, CPIA2_CMD_GET_DEVICE_CONFIG, TRANSFER_READ, 0); cmd.buffer.registers[1].value = cam->params.vp_params.device_config | CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE; cmd.buffer.registers[0].index = CPIA2_VP_SYSTEMCTRL; cmd.buffer.registers[1].index = CPIA2_VP_DEVICE_CONFIG; cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP; cmd.reg_count = 2; cmd.direction = TRANSFER_WRITE; cmd.start = 0; cpia2_send_command(cam, &cmd); /* Set the correct I2C address in the CPiA-2 system register */ cpia2_do_command(cam, CPIA2_CMD_SET_SERIAL_ADDR, TRANSFER_WRITE, CPIA2_SYSTEM_VP_SERIAL_ADDR_SENSOR); /* Now have sensor access - set bit to turn the audio regulator off */ cpia2_do_command(cam, CPIA2_CMD_SET_SENSOR_CR1, TRANSFER_WRITE, CPIA2_SENSOR_CR1_DOWN_AUDIO_REGULATOR); /* Set the correct I2C address in the CPiA-2 system register */ if (cam->params.pnp_id.device_type == DEVICE_STV_672) cpia2_do_command(cam, CPIA2_CMD_SET_SERIAL_ADDR, TRANSFER_WRITE, CPIA2_SYSTEM_VP_SERIAL_ADDR_VP); // 0x88 else cpia2_do_command(cam, CPIA2_CMD_SET_SERIAL_ADDR, TRANSFER_WRITE, CPIA2_SYSTEM_VP_SERIAL_ADDR_676_VP); // 0x8a /* increase signal drive strength */ if (cam->params.pnp_id.device_type == DEVICE_STV_676) cpia2_do_command(cam, CPIA2_CMD_SET_VP_EXP_MODES, TRANSFER_WRITE, CPIA2_VP_EXPOSURE_MODES_COMPILE_EXP); /* Start autoexposure */ cpia2_do_command(cam, CPIA2_CMD_GET_DEVICE_CONFIG, TRANSFER_READ, 0); cmd.buffer.registers[0].value = cam->params.vp_params.device_config & (CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE ^ 0xFF); cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_CTRL, TRANSFER_READ, 0); cmd.buffer.registers[1].value = cam->params.vp_params.system_ctrl | CPIA2_VP_SYSTEMCTRL_HK_CONTROL; cmd.buffer.registers[0].index = CPIA2_VP_DEVICE_CONFIG; cmd.buffer.registers[1].index = CPIA2_VP_SYSTEMCTRL; cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP; cmd.reg_count = 2; cmd.direction = TRANSFER_WRITE; cpia2_send_command(cam, &cmd); /* Set compression state */ cpia2_do_command(cam, CPIA2_CMD_GET_VC_CONTROL, TRANSFER_READ, 0); if (cam->params.compression.inhibit_htables) { tmp_reg = cam->params.vc_params.vc_control | CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES; } else { tmp_reg = cam->params.vc_params.vc_control & ~CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES; } cpia2_do_command(cam, CPIA2_CMD_SET_VC_CONTROL, TRANSFER_WRITE,tmp_reg); /* Set target size (kb) on vc */ cpia2_do_command(cam, CPIA2_CMD_SET_TARGET_KB, TRANSFER_WRITE, cam->params.vc_params.target_kb); /* Wiggle VC Reset */ /*** * First read and wait a bit. ***/ for (i = 0; i < 50; i++) { cpia2_do_command(cam, CPIA2_CMD_GET_PW_CONTROL, TRANSFER_READ, 0); } tmp_reg = cam->params.vc_params.pw_control; tmp_reg &= ~CPIA2_VC_PW_CTRL_VC_RESET_N; cpia2_do_command(cam, CPIA2_CMD_SET_PW_CONTROL, TRANSFER_WRITE,tmp_reg); tmp_reg |= CPIA2_VC_PW_CTRL_VC_RESET_N; cpia2_do_command(cam, CPIA2_CMD_SET_PW_CONTROL, TRANSFER_WRITE,tmp_reg); cpia2_do_command(cam, CPIA2_CMD_SET_DEF_JPEG_OPT, TRANSFER_WRITE, 0); cpia2_do_command(cam, CPIA2_CMD_GET_USER_MODE, TRANSFER_READ, 0); DBG("After VC RESET, user mode is 0x%0X\n", cam->params.vp_params.video_mode); return retval; } /****************************************************************************** * * cpia2_set_high_power * *****************************************************************************/ static int cpia2_set_high_power(struct camera_data *cam) { int i; for (i = 0; i <= 50; i++) { /* Read system status */ cpia2_do_command(cam,CPIA2_CMD_GET_SYSTEM_CTRL,TRANSFER_READ,0); /* If there is an error, clear it */ if(cam->params.camera_state.system_ctrl & CPIA2_SYSTEM_CONTROL_V2W_ERR) cpia2_do_command(cam, CPIA2_CMD_CLEAR_V2W_ERR, TRANSFER_WRITE, 0); /* Try to set high power mode */ cpia2_do_command(cam, CPIA2_CMD_SET_SYSTEM_CTRL, TRANSFER_WRITE, 1); /* Try to read something in VP to check if everything is awake */ cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_STATE, TRANSFER_READ, 0); if (cam->params.vp_params.system_state & CPIA2_VP_SYSTEMSTATE_HK_ALIVE) { break; } else if (i == 50) { cam->params.camera_state.power_mode = LO_POWER_MODE; ERR("Camera did not wake up\n"); return -EIO; } } DBG("System now in high power state\n"); cam->params.camera_state.power_mode = HI_POWER_MODE; return 0; } /****************************************************************************** * * cpia2_set_low_power * *****************************************************************************/ int cpia2_set_low_power(struct camera_data *cam) { cam->params.camera_state.power_mode = LO_POWER_MODE; cpia2_do_command(cam, CPIA2_CMD_SET_SYSTEM_CTRL, TRANSFER_WRITE, 0); return 0; } /****************************************************************************** * * apply_vp_patch * *****************************************************************************/ static int cpia2_send_onebyte_command(struct camera_data *cam, struct cpia2_command *cmd, u8 start, u8 datum) { cmd->buffer.block_data[0] = datum; cmd->start = start; cmd->reg_count = 1; return cpia2_send_command(cam, cmd); } static int apply_vp_patch(struct camera_data *cam) { const struct firmware *fw; const char fw_name[] = "cpia2/stv0672_vp4.bin"; int i, ret; struct cpia2_command cmd; ret = request_firmware(&fw, fw_name, &cam->dev->dev); if (ret) { printk(KERN_ERR "cpia2: failed to load VP patch \"%s\"\n", fw_name); return ret; } cmd.req_mode = CAMERAACCESS_TYPE_REPEAT | CAMERAACCESS_VP; cmd.direction = TRANSFER_WRITE; /* First send the start address... */ cpia2_send_onebyte_command(cam, &cmd, 0x0A, fw->data[0]); /* hi */ cpia2_send_onebyte_command(cam, &cmd, 0x0B, fw->data[1]); /* lo */ /* ... followed by the data payload */ for (i = 2; i < fw->size; i += 64) { cmd.start = 0x0C; /* Data */ cmd.reg_count = min_t(int, 64, fw->size - i); memcpy(cmd.buffer.block_data, &fw->data[i], cmd.reg_count); cpia2_send_command(cam, &cmd); } /* Next send the start address... */ cpia2_send_onebyte_command(cam, &cmd, 0x0A, fw->data[0]); /* hi */ cpia2_send_onebyte_command(cam, &cmd, 0x0B, fw->data[1]); /* lo */ /* ... followed by the 'goto' command */ cpia2_send_onebyte_command(cam, &cmd, 0x0D, 1); release_firmware(fw); return 0; } /****************************************************************************** * * set_default_user_mode * *****************************************************************************/ static int set_default_user_mode(struct camera_data *cam) { unsigned char user_mode; unsigned char frame_rate; int width = cam->params.roi.width; int height = cam->params.roi.height; switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_404: case CPIA2_VP_SENSOR_FLAGS_407: case CPIA2_VP_SENSOR_FLAGS_409: case CPIA2_VP_SENSOR_FLAGS_410: if ((width > STV_IMAGE_QCIF_COLS) || (height > STV_IMAGE_QCIF_ROWS)) { user_mode = CPIA2_VP_USER_MODE_CIF; } else { user_mode = CPIA2_VP_USER_MODE_QCIFDS; } frame_rate = CPIA2_VP_FRAMERATE_30; break; case CPIA2_VP_SENSOR_FLAGS_500: if ((width > STV_IMAGE_CIF_COLS) || (height > STV_IMAGE_CIF_ROWS)) { user_mode = CPIA2_VP_USER_MODE_VGA; } else { user_mode = CPIA2_VP_USER_MODE_QVGADS; } if (cam->params.pnp_id.device_type == DEVICE_STV_672) frame_rate = CPIA2_VP_FRAMERATE_15; else frame_rate = CPIA2_VP_FRAMERATE_30; break; default: LOG("%s: Invalid sensor flag value 0x%0X\n",__func__, cam->params.version.sensor_flags); return -EINVAL; } DBG("Sensor flag = 0x%0x, user mode = 0x%0x, frame rate = 0x%X\n", cam->params.version.sensor_flags, user_mode, frame_rate); cpia2_do_command(cam, CPIA2_CMD_SET_USER_MODE, TRANSFER_WRITE, user_mode); if(cam->params.vp_params.frame_rate > 0 && frame_rate > cam->params.vp_params.frame_rate) frame_rate = cam->params.vp_params.frame_rate; cpia2_set_fps(cam, frame_rate); // if (cam->params.pnp_id.device_type == DEVICE_STV_676) // cpia2_do_command(cam, // CPIA2_CMD_SET_VP_SYSTEM_CTRL, // TRANSFER_WRITE, // CPIA2_VP_SYSTEMCTRL_HK_CONTROL | // CPIA2_VP_SYSTEMCTRL_POWER_CONTROL); return 0; } /****************************************************************************** * * cpia2_match_video_size * * return the best match, where 'best' is as always * the largest that is not bigger than what is requested. *****************************************************************************/ int cpia2_match_video_size(int width, int height) { if (width >= STV_IMAGE_VGA_COLS && height >= STV_IMAGE_VGA_ROWS) return VIDEOSIZE_VGA; if (width >= STV_IMAGE_CIF_COLS && height >= STV_IMAGE_CIF_ROWS) return VIDEOSIZE_CIF; if (width >= STV_IMAGE_QVGA_COLS && height >= STV_IMAGE_QVGA_ROWS) return VIDEOSIZE_QVGA; if (width >= 288 && height >= 216) return VIDEOSIZE_288_216; if (width >= 256 && height >= 192) return VIDEOSIZE_256_192; if (width >= 224 && height >= 168) return VIDEOSIZE_224_168; if (width >= 192 && height >= 144) return VIDEOSIZE_192_144; if (width >= STV_IMAGE_QCIF_COLS && height >= STV_IMAGE_QCIF_ROWS) return VIDEOSIZE_QCIF; return -1; } /****************************************************************************** * * SetVideoSize * *****************************************************************************/ static int set_vw_size(struct camera_data *cam, int size) { int retval = 0; cam->params.vp_params.video_size = size; switch (size) { case VIDEOSIZE_VGA: DBG("Setting size to VGA\n"); cam->params.roi.width = STV_IMAGE_VGA_COLS; cam->params.roi.height = STV_IMAGE_VGA_ROWS; cam->width = STV_IMAGE_VGA_COLS; cam->height = STV_IMAGE_VGA_ROWS; break; case VIDEOSIZE_CIF: DBG("Setting size to CIF\n"); cam->params.roi.width = STV_IMAGE_CIF_COLS; cam->params.roi.height = STV_IMAGE_CIF_ROWS; cam->width = STV_IMAGE_CIF_COLS; cam->height = STV_IMAGE_CIF_ROWS; break; case VIDEOSIZE_QVGA: DBG("Setting size to QVGA\n"); cam->params.roi.width = STV_IMAGE_QVGA_COLS; cam->params.roi.height = STV_IMAGE_QVGA_ROWS; cam->width = STV_IMAGE_QVGA_COLS; cam->height = STV_IMAGE_QVGA_ROWS; break; case VIDEOSIZE_288_216: cam->params.roi.width = 288; cam->params.roi.height = 216; cam->width = 288; cam->height = 216; break; case VIDEOSIZE_256_192: cam->width = 256; cam->height = 192; cam->params.roi.width = 256; cam->params.roi.height = 192; break; case VIDEOSIZE_224_168: cam->width = 224; cam->height = 168; cam->params.roi.width = 224; cam->params.roi.height = 168; break; case VIDEOSIZE_192_144: cam->width = 192; cam->height = 144; cam->params.roi.width = 192; cam->params.roi.height = 144; break; case VIDEOSIZE_QCIF: DBG("Setting size to QCIF\n"); cam->params.roi.width = STV_IMAGE_QCIF_COLS; cam->params.roi.height = STV_IMAGE_QCIF_ROWS; cam->width = STV_IMAGE_QCIF_COLS; cam->height = STV_IMAGE_QCIF_ROWS; break; default: retval = -EINVAL; } return retval; } /****************************************************************************** * * configure_sensor * *****************************************************************************/ static int configure_sensor(struct camera_data *cam, int req_width, int req_height) { int retval; switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_404: case CPIA2_VP_SENSOR_FLAGS_407: case CPIA2_VP_SENSOR_FLAGS_409: case CPIA2_VP_SENSOR_FLAGS_410: retval = config_sensor_410(cam, req_width, req_height); break; case CPIA2_VP_SENSOR_FLAGS_500: retval = config_sensor_500(cam, req_width, req_height); break; default: return -EINVAL; } return retval; } /****************************************************************************** * * config_sensor_410 * *****************************************************************************/ static int config_sensor_410(struct camera_data *cam, int req_width, int req_height) { struct cpia2_command cmd; int i = 0; int image_size; int image_type; int width = req_width; int height = req_height; /*** * Make sure size doesn't exceed CIF. ***/ if (width > STV_IMAGE_CIF_COLS) width = STV_IMAGE_CIF_COLS; if (height > STV_IMAGE_CIF_ROWS) height = STV_IMAGE_CIF_ROWS; image_size = cpia2_match_video_size(width, height); DBG("Config 410: width = %d, height = %d\n", width, height); DBG("Image size returned is %d\n", image_size); if (image_size >= 0) { set_vw_size(cam, image_size); width = cam->params.roi.width; height = cam->params.roi.height; DBG("After set_vw_size(), width = %d, height = %d\n", width, height); if (width <= 176 && height <= 144) { DBG("image type = VIDEOSIZE_QCIF\n"); image_type = VIDEOSIZE_QCIF; } else if (width <= 320 && height <= 240) { DBG("image type = VIDEOSIZE_QVGA\n"); image_type = VIDEOSIZE_QVGA; } else { DBG("image type = VIDEOSIZE_CIF\n"); image_type = VIDEOSIZE_CIF; } } else { ERR("ConfigSensor410 failed\n"); return -EINVAL; } cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.direction = TRANSFER_WRITE; /* VC Format */ cmd.buffer.registers[i].index = CPIA2_VC_VC_FORMAT; if (image_type == VIDEOSIZE_CIF) { cmd.buffer.registers[i++].value = (u8) (CPIA2_VC_VC_FORMAT_UFIRST | CPIA2_VC_VC_FORMAT_SHORTLINE); } else { cmd.buffer.registers[i++].value = (u8) CPIA2_VC_VC_FORMAT_UFIRST; } /* VC Clocks */ cmd.buffer.registers[i].index = CPIA2_VC_VC_CLOCKS; if (image_type == VIDEOSIZE_QCIF) { if (cam->params.pnp_id.device_type == DEVICE_STV_672) { cmd.buffer.registers[i++].value= (u8)(CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_672_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV2); DBG("VC_Clocks (0xc4) should be B\n"); } else { cmd.buffer.registers[i++].value= (u8)(CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_CLOCKS_LOGDIV2); } } else { if (cam->params.pnp_id.device_type == DEVICE_STV_672) { cmd.buffer.registers[i++].value = (u8) (CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_CLOCKS_LOGDIV0); } else { cmd.buffer.registers[i++].value = (u8) (CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_676_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV0); } } DBG("VC_Clocks (0xc4) = 0x%0X\n", cmd.buffer.registers[i-1].value); /* Input reqWidth from VC */ cmd.buffer.registers[i].index = CPIA2_VC_VC_IHSIZE_LO; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) (STV_IMAGE_QCIF_COLS / 4); else cmd.buffer.registers[i++].value = (u8) (STV_IMAGE_CIF_COLS / 4); /* Timings */ cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_HI; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_LO; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 208; else cmd.buffer.registers[i++].value = (u8) 160; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_HI; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_LO; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 160; else cmd.buffer.registers[i++].value = (u8) 64; /* Output Image Size */ cmd.buffer.registers[i].index = CPIA2_VC_VC_OHSIZE; cmd.buffer.registers[i++].value = cam->params.roi.width / 4; cmd.buffer.registers[i].index = CPIA2_VC_VC_OVSIZE; cmd.buffer.registers[i++].value = cam->params.roi.height / 4; /* Cropping */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HCROP; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_COLS / 4) - (width / 4)) / 2); else cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_COLS / 4) - (width / 4)) / 2); cmd.buffer.registers[i].index = CPIA2_VC_VC_VCROP; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_ROWS / 4) - (height / 4)) / 2); else cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_ROWS / 4) - (height / 4)) / 2); /* Scaling registers (defaults) */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HPHASE; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VPHASE; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HISPAN; cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_VISPAN; cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_HICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HFRACT; cmd.buffer.registers[i++].value = (u8) 0x81; /* = 8/1 = 8 (HIBYTE/LOBYTE) */ cmd.buffer.registers[i].index = CPIA2_VC_VC_VFRACT; cmd.buffer.registers[i++].value = (u8) 0x81; /* = 8/1 = 8 (HIBYTE/LOBYTE) */ cmd.reg_count = i; cpia2_send_command(cam, &cmd); return i; } /****************************************************************************** * * config_sensor_500(cam) * *****************************************************************************/ static int config_sensor_500(struct camera_data *cam, int req_width, int req_height) { struct cpia2_command cmd; int i = 0; int image_size = VIDEOSIZE_CIF; int image_type = VIDEOSIZE_VGA; int width = req_width; int height = req_height; unsigned int device = cam->params.pnp_id.device_type; image_size = cpia2_match_video_size(width, height); if (width > STV_IMAGE_CIF_COLS || height > STV_IMAGE_CIF_ROWS) image_type = VIDEOSIZE_VGA; else if (width > STV_IMAGE_QVGA_COLS || height > STV_IMAGE_QVGA_ROWS) image_type = VIDEOSIZE_CIF; else if (width > STV_IMAGE_QCIF_COLS || height > STV_IMAGE_QCIF_ROWS) image_type = VIDEOSIZE_QVGA; else image_type = VIDEOSIZE_QCIF; if (image_size >= 0) { set_vw_size(cam, image_size); width = cam->params.roi.width; height = cam->params.roi.height; } else { ERR("ConfigSensor500 failed\n"); return -EINVAL; } DBG("image_size = %d, width = %d, height = %d, type = %d\n", image_size, width, height, image_type); cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.direction = TRANSFER_WRITE; i = 0; /* VC Format */ cmd.buffer.registers[i].index = CPIA2_VC_VC_FORMAT; cmd.buffer.registers[i].value = (u8) CPIA2_VC_VC_FORMAT_UFIRST; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i].value |= (u8) CPIA2_VC_VC_FORMAT_DECIMATING; i++; /* VC Clocks */ cmd.buffer.registers[i].index = CPIA2_VC_VC_CLOCKS; if (device == DEVICE_STV_672) { if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i].value = (u8)CPIA2_VC_VC_CLOCKS_LOGDIV1; else cmd.buffer.registers[i].value = (u8)(CPIA2_VC_VC_672_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV3); } else { if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i].value = (u8)CPIA2_VC_VC_CLOCKS_LOGDIV0; else cmd.buffer.registers[i].value = (u8)(CPIA2_VC_VC_676_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV2); } i++; DBG("VC_CLOCKS = 0x%X\n", cmd.buffer.registers[i-1].value); /* Input width from VP */ cmd.buffer.registers[i].index = CPIA2_VC_VC_IHSIZE_LO; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i].value = (u8) (STV_IMAGE_VGA_COLS / 4); else cmd.buffer.registers[i].value = (u8) (STV_IMAGE_QVGA_COLS / 4); i++; DBG("Input width = %d\n", cmd.buffer.registers[i-1].value); /* Timings */ cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_HI; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 2; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_LO; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 250; else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) 125; else cmd.buffer.registers[i++].value = (u8) 160; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_HI; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 2; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_LO; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 12; else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) 64; else cmd.buffer.registers[i++].value = (u8) 6; /* Output Image Size */ cmd.buffer.registers[i].index = CPIA2_VC_VC_OHSIZE; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = STV_IMAGE_CIF_COLS / 4; else cmd.buffer.registers[i++].value = width / 4; cmd.buffer.registers[i].index = CPIA2_VC_VC_OVSIZE; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = STV_IMAGE_CIF_ROWS / 4; else cmd.buffer.registers[i++].value = height / 4; /* Cropping */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HCROP; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_VGA_COLS / 4) - (width / 4)) / 2); else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QVGA_COLS / 4) - (width / 4)) / 2); else if (image_type == VIDEOSIZE_CIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_COLS / 4) - (width / 4)) / 2); else /*if (image_type == VIDEOSIZE_QCIF)*/ cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_COLS / 4) - (width / 4)) / 2); cmd.buffer.registers[i].index = CPIA2_VC_VC_VCROP; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_VGA_ROWS / 4) - (height / 4)) / 2); else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QVGA_ROWS / 4) - (height / 4)) / 2); else if (image_type == VIDEOSIZE_CIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_ROWS / 4) - (height / 4)) / 2); else /*if (image_type == VIDEOSIZE_QCIF)*/ cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_ROWS / 4) - (height / 4)) / 2); /* Scaling registers (defaults) */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HPHASE; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 36; else cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VPHASE; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 32; else cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HISPAN; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 26; else cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_VISPAN; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 21; else cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_HICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HFRACT; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0x2B; /* 2/11 */ else cmd.buffer.registers[i++].value = (u8) 0x81; /* 8/1 */ cmd.buffer.registers[i].index = CPIA2_VC_VC_VFRACT; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0x13; /* 1/3 */ else cmd.buffer.registers[i++].value = (u8) 0x81; /* 8/1 */ cmd.reg_count = i; cpia2_send_command(cam, &cmd); return i; } /****************************************************************************** * * setallproperties * * This sets all user changeable properties to the values in cam->params. *****************************************************************************/ static int set_all_properties(struct camera_data *cam) { /** * Don't set target_kb here, it will be set later. * framerate and user_mode were already set (set_default_user_mode). **/ cpia2_set_color_params(cam); cpia2_usb_change_streaming_alternate(cam, cam->params.camera_state.stream_mode); cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE, cam->params.vp_params.user_effects); cpia2_set_flicker_mode(cam, cam->params.flicker_control.flicker_mode_req); cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION, TRANSFER_WRITE, cam->params.vp_params.gpio_direction); cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE, cam->params.vp_params.gpio_data); wake_system(cam); set_lowlight_boost(cam); return 0; } /****************************************************************************** * * cpia2_save_camera_state * *****************************************************************************/ void cpia2_save_camera_state(struct camera_data *cam) { get_color_params(cam); cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DATA, TRANSFER_READ, 0); /* Don't get framerate or target_kb. Trust the values we already have */ } /****************************************************************************** * * get_color_params * *****************************************************************************/ static void get_color_params(struct camera_data *cam) { cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0); } /****************************************************************************** * * cpia2_set_color_params * *****************************************************************************/ void cpia2_set_color_params(struct camera_data *cam) { DBG("Setting color params\n"); cpia2_set_brightness(cam, cam->params.color_params.brightness); cpia2_set_contrast(cam, cam->params.color_params.contrast); cpia2_set_saturation(cam, cam->params.color_params.saturation); } /****************************************************************************** * * cpia2_set_flicker_mode * *****************************************************************************/ int cpia2_set_flicker_mode(struct camera_data *cam, int mode) { unsigned char cam_reg; int err = 0; if(cam->params.pnp_id.device_type != DEVICE_STV_672) return -EINVAL; /* Set the appropriate bits in FLICKER_MODES, preserving the rest */ if((err = cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES, TRANSFER_READ, 0))) return err; cam_reg = cam->params.flicker_control.cam_register; switch(mode) { case NEVER_FLICKER: cam_reg |= CPIA2_VP_FLICKER_MODES_NEVER_FLICKER; cam_reg &= ~CPIA2_VP_FLICKER_MODES_50HZ; break; case FLICKER_60: cam_reg &= ~CPIA2_VP_FLICKER_MODES_NEVER_FLICKER; cam_reg &= ~CPIA2_VP_FLICKER_MODES_50HZ; break; case FLICKER_50: cam_reg &= ~CPIA2_VP_FLICKER_MODES_NEVER_FLICKER; cam_reg |= CPIA2_VP_FLICKER_MODES_50HZ; break; default: return -EINVAL; } if((err = cpia2_do_command(cam, CPIA2_CMD_SET_FLICKER_MODES, TRANSFER_WRITE, cam_reg))) return err; /* Set the appropriate bits in EXP_MODES, preserving the rest */ if((err = cpia2_do_command(cam, CPIA2_CMD_GET_VP_EXP_MODES, TRANSFER_READ, 0))) return err; cam_reg = cam->params.vp_params.exposure_modes; if (mode == NEVER_FLICKER) { cam_reg |= CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER; } else { cam_reg &= ~CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER; } if((err = cpia2_do_command(cam, CPIA2_CMD_SET_VP_EXP_MODES, TRANSFER_WRITE, cam_reg))) return err; if((err = cpia2_do_command(cam, CPIA2_CMD_REHASH_VP4, TRANSFER_WRITE, 1))) return err; switch(mode) { case NEVER_FLICKER: cam->params.flicker_control.flicker_mode_req = mode; break; case FLICKER_60: cam->params.flicker_control.flicker_mode_req = mode; cam->params.flicker_control.mains_frequency = 60; break; case FLICKER_50: cam->params.flicker_control.flicker_mode_req = mode; cam->params.flicker_control.mains_frequency = 50; break; default: err = -EINVAL; } return err; } /****************************************************************************** * * cpia2_set_property_flip * *****************************************************************************/ void cpia2_set_property_flip(struct camera_data *cam, int prop_val) { unsigned char cam_reg; cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); cam_reg = cam->params.vp_params.user_effects; if (prop_val) { cam_reg |= CPIA2_VP_USER_EFFECTS_FLIP; } else { cam_reg &= ~CPIA2_VP_USER_EFFECTS_FLIP; } cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE, cam_reg); } /****************************************************************************** * * cpia2_set_property_mirror * *****************************************************************************/ void cpia2_set_property_mirror(struct camera_data *cam, int prop_val) { unsigned char cam_reg; cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); cam_reg = cam->params.vp_params.user_effects; if (prop_val) { cam_reg |= CPIA2_VP_USER_EFFECTS_MIRROR; } else { cam_reg &= ~CPIA2_VP_USER_EFFECTS_MIRROR; } cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE, cam_reg); } /****************************************************************************** * * set_target_kb * * The new Target KB is set in cam->params.vc_params.target_kb and * activates on reset. *****************************************************************************/ int cpia2_set_target_kb(struct camera_data *cam, unsigned char value) { DBG("Requested target_kb = %d\n", value); if (value != cam->params.vc_params.target_kb) { cpia2_usb_stream_pause(cam); /* reset camera for new target_kb */ cam->params.vc_params.target_kb = value; cpia2_reset_camera(cam); cpia2_usb_stream_resume(cam); } return 0; } /****************************************************************************** * * cpia2_set_gpio * *****************************************************************************/ int cpia2_set_gpio(struct camera_data *cam, unsigned char setting) { int ret; /* Set the microport direction (register 0x90, should be defined * already) to 1 (user output), and set the microport data (0x91) to * the value in the ioctl argument. */ ret = cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION, CPIA2_VC_MP_DIR_OUTPUT, 255); if (ret < 0) return ret; cam->params.vp_params.gpio_direction = 255; ret = cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, CPIA2_VC_MP_DIR_OUTPUT, setting); if (ret < 0) return ret; cam->params.vp_params.gpio_data = setting; return 0; } /****************************************************************************** * * cpia2_set_fps * *****************************************************************************/ int cpia2_set_fps(struct camera_data *cam, int framerate) { int retval; switch(framerate) { case CPIA2_VP_FRAMERATE_30: case CPIA2_VP_FRAMERATE_25: if(cam->params.pnp_id.device_type == DEVICE_STV_672 && cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) { return -EINVAL; } /* Fall through */ case CPIA2_VP_FRAMERATE_15: case CPIA2_VP_FRAMERATE_12_5: case CPIA2_VP_FRAMERATE_7_5: case CPIA2_VP_FRAMERATE_6_25: break; default: return -EINVAL; } if (cam->params.pnp_id.device_type == DEVICE_STV_672 && framerate == CPIA2_VP_FRAMERATE_15) framerate = 0; /* Work around bug in VP4 */ retval = cpia2_do_command(cam, CPIA2_CMD_FRAMERATE_REQ, TRANSFER_WRITE, framerate); if(retval == 0) cam->params.vp_params.frame_rate = framerate; return retval; } /****************************************************************************** * * cpia2_set_brightness * *****************************************************************************/ void cpia2_set_brightness(struct camera_data *cam, unsigned char value) { /*** * Don't let the register be set to zero - bug in VP4 - flash of full * brightness ***/ if (cam->params.pnp_id.device_type == DEVICE_STV_672 && value == 0) value++; DBG("Setting brightness to %d (0x%0x)\n", value, value); cpia2_do_command(cam,CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE,value); } /****************************************************************************** * * cpia2_set_contrast * *****************************************************************************/ void cpia2_set_contrast(struct camera_data *cam, unsigned char value) { DBG("Setting contrast to %d (0x%0x)\n", value, value); cam->params.color_params.contrast = value; cpia2_do_command(cam, CPIA2_CMD_SET_CONTRAST, TRANSFER_WRITE, value); } /****************************************************************************** * * cpia2_set_saturation * *****************************************************************************/ void cpia2_set_saturation(struct camera_data *cam, unsigned char value) { DBG("Setting saturation to %d (0x%0x)\n", value, value); cam->params.color_params.saturation = value; cpia2_do_command(cam,CPIA2_CMD_SET_VP_SATURATION, TRANSFER_WRITE,value); } /****************************************************************************** * * wake_system * *****************************************************************************/ static void wake_system(struct camera_data *cam) { cpia2_do_command(cam, CPIA2_CMD_SET_WAKEUP, TRANSFER_WRITE, 0); } /****************************************************************************** * * set_lowlight_boost * * Valid for STV500 sensor only *****************************************************************************/ static void set_lowlight_boost(struct camera_data *cam) { struct cpia2_command cmd; if (cam->params.pnp_id.device_type != DEVICE_STV_672 || cam->params.version.sensor_flags != CPIA2_VP_SENSOR_FLAGS_500) return; cmd.direction = TRANSFER_WRITE; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 3; cmd.start = CPIA2_VP_RAM_ADDR_H; cmd.buffer.block_data[0] = 0; /* High byte of address to write to */ cmd.buffer.block_data[1] = 0x59; /* Low byte of address to write to */ cmd.buffer.block_data[2] = 0; /* High byte of data to write */ cpia2_send_command(cam, &cmd); if (cam->params.vp_params.lowlight_boost) { cmd.buffer.block_data[0] = 0x02; /* Low byte data to write */ } else { cmd.buffer.block_data[0] = 0x06; } cmd.start = CPIA2_VP_RAM_DATA; cmd.reg_count = 1; cpia2_send_command(cam, &cmd); /* Rehash the VP4 values */ cpia2_do_command(cam, CPIA2_CMD_REHASH_VP4, TRANSFER_WRITE, 1); } /****************************************************************************** * * cpia2_set_format * * Assumes that new size is already set in param struct. *****************************************************************************/ void cpia2_set_format(struct camera_data *cam) { cam->flush = true; cpia2_usb_stream_pause(cam); /* reset camera to new size */ cpia2_set_low_power(cam); cpia2_reset_camera(cam); cam->flush = false; cpia2_dbg_dump_registers(cam); cpia2_usb_stream_resume(cam); } /****************************************************************************** * * cpia2_dbg_dump_registers * *****************************************************************************/ void cpia2_dbg_dump_registers(struct camera_data *cam) { #ifdef _CPIA2_DEBUG_ struct cpia2_command cmd; if (!(debugs_on & DEBUG_DUMP_REGS)) return; cmd.direction = TRANSFER_READ; /* Start with bank 0 (SYSTEM) */ cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 3; cmd.start = 0; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "System Device Hi = 0x%X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "System Device Lo = 0x%X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "System_system control = 0x%X\n", cmd.buffer.block_data[2]); /* Bank 1 (VC) */ cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 4; cmd.start = 0x80; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "ASIC_ID = 0x%X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "ASIC_REV = 0x%X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "PW_CONTRL = 0x%X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "WAKEUP = 0x%X\n", cmd.buffer.block_data[3]); cmd.start = 0xA0; /* ST_CTRL */ cmd.reg_count = 1; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "Stream ctrl = 0x%X\n", cmd.buffer.block_data[0]); cmd.start = 0xA4; /* Stream status */ cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "Stream status = 0x%X\n", cmd.buffer.block_data[0]); cmd.start = 0xA8; /* USB status */ cmd.reg_count = 3; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "USB_CTRL = 0x%X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "USB_STRM = 0x%X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "USB_STATUS = 0x%X\n", cmd.buffer.block_data[2]); cmd.start = 0xAF; /* USB settings */ cmd.reg_count = 1; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "USB settings = 0x%X\n", cmd.buffer.block_data[0]); cmd.start = 0xC0; /* VC stuff */ cmd.reg_count = 26; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VC Control = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VC Format = 0x%0X\n", cmd.buffer.block_data[3]); printk(KERN_DEBUG "VC Clocks = 0x%0X\n", cmd.buffer.block_data[4]); printk(KERN_DEBUG "VC IHSize = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VC Xlim Hi = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VC XLim Lo = 0x%0X\n", cmd.buffer.block_data[7]); printk(KERN_DEBUG "VC YLim Hi = 0x%0X\n", cmd.buffer.block_data[8]); printk(KERN_DEBUG "VC YLim Lo = 0x%0X\n", cmd.buffer.block_data[9]); printk(KERN_DEBUG "VC OHSize = 0x%0X\n", cmd.buffer.block_data[10]); printk(KERN_DEBUG "VC OVSize = 0x%0X\n", cmd.buffer.block_data[11]); printk(KERN_DEBUG "VC HCrop = 0x%0X\n", cmd.buffer.block_data[12]); printk(KERN_DEBUG "VC VCrop = 0x%0X\n", cmd.buffer.block_data[13]); printk(KERN_DEBUG "VC HPhase = 0x%0X\n", cmd.buffer.block_data[14]); printk(KERN_DEBUG "VC VPhase = 0x%0X\n", cmd.buffer.block_data[15]); printk(KERN_DEBUG "VC HIspan = 0x%0X\n", cmd.buffer.block_data[16]); printk(KERN_DEBUG "VC VIspan = 0x%0X\n", cmd.buffer.block_data[17]); printk(KERN_DEBUG "VC HiCrop = 0x%0X\n", cmd.buffer.block_data[18]); printk(KERN_DEBUG "VC ViCrop = 0x%0X\n", cmd.buffer.block_data[19]); printk(KERN_DEBUG "VC HiFract = 0x%0X\n", cmd.buffer.block_data[20]); printk(KERN_DEBUG "VC ViFract = 0x%0X\n", cmd.buffer.block_data[21]); printk(KERN_DEBUG "VC JPeg Opt = 0x%0X\n", cmd.buffer.block_data[22]); printk(KERN_DEBUG "VC Creep Per = 0x%0X\n", cmd.buffer.block_data[23]); printk(KERN_DEBUG "VC User Sq. = 0x%0X\n", cmd.buffer.block_data[24]); printk(KERN_DEBUG "VC Target KB = 0x%0X\n", cmd.buffer.block_data[25]); /*** VP ***/ cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 14; cmd.start = 0; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP Dev Hi = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP Dev Lo = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP Sys State = 0x%0X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "VP Sys Ctrl = 0x%0X\n", cmd.buffer.block_data[3]); printk(KERN_DEBUG "VP Sensor flg = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VP Sensor Rev = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VP Dev Config = 0x%0X\n", cmd.buffer.block_data[7]); printk(KERN_DEBUG "VP GPIO_DIR = 0x%0X\n", cmd.buffer.block_data[8]); printk(KERN_DEBUG "VP GPIO_DATA = 0x%0X\n", cmd.buffer.block_data[9]); printk(KERN_DEBUG "VP Ram ADDR H = 0x%0X\n", cmd.buffer.block_data[10]); printk(KERN_DEBUG "VP Ram ADDR L = 0x%0X\n", cmd.buffer.block_data[11]); printk(KERN_DEBUG "VP RAM Data = 0x%0X\n", cmd.buffer.block_data[12]); printk(KERN_DEBUG "Do Call = 0x%0X\n", cmd.buffer.block_data[13]); if (cam->params.pnp_id.device_type == DEVICE_STV_672) { cmd.reg_count = 9; cmd.start = 0x0E; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP Clock Ctrl = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP Patch Rev = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP Vid Mode = 0x%0X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "VP Framerate = 0x%0X\n", cmd.buffer.block_data[3]); printk(KERN_DEBUG "VP UserEffect = 0x%0X\n", cmd.buffer.block_data[4]); printk(KERN_DEBUG "VP White Bal = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VP WB thresh = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VP Exp Modes = 0x%0X\n", cmd.buffer.block_data[7]); printk(KERN_DEBUG "VP Exp Target = 0x%0X\n", cmd.buffer.block_data[8]); cmd.reg_count = 1; cmd.start = 0x1B; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP FlickerMds = 0x%0X\n", cmd.buffer.block_data[0]); } else { cmd.reg_count = 8 ; cmd.start = 0x0E; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP Clock Ctrl = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP Patch Rev = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP Vid Mode = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VP Framerate = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VP UserEffect = 0x%0X\n", cmd.buffer.block_data[7]); cmd.reg_count = 1; cmd.start = CPIA2_VP5_EXPOSURE_TARGET; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP5 Exp Target= 0x%0X\n", cmd.buffer.block_data[0]); cmd.reg_count = 4; cmd.start = 0x3A; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP5 MY Black = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP5 MCY Range = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP5 MYCEILING = 0x%0X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "VP5 MCUV Sat = 0x%0X\n", cmd.buffer.block_data[3]); } #endif } /****************************************************************************** * * reset_camera_struct * * Sets all values to the defaults *****************************************************************************/ static void reset_camera_struct(struct camera_data *cam) { /*** * The following parameter values are the defaults from the register map. ***/ cam->params.color_params.brightness = DEFAULT_BRIGHTNESS; cam->params.color_params.contrast = DEFAULT_CONTRAST; cam->params.color_params.saturation = DEFAULT_SATURATION; cam->params.vp_params.lowlight_boost = 0; /* FlickerModes */ cam->params.flicker_control.flicker_mode_req = NEVER_FLICKER; cam->params.flicker_control.mains_frequency = 60; /* jpeg params */ cam->params.compression.jpeg_options = CPIA2_VC_VC_JPEG_OPT_DEFAULT; cam->params.compression.creep_period = 2; cam->params.compression.user_squeeze = 20; cam->params.compression.inhibit_htables = false; /* gpio params */ cam->params.vp_params.gpio_direction = 0; /* write, the default safe mode */ cam->params.vp_params.gpio_data = 0; /* Target kb params */ cam->params.vc_params.target_kb = DEFAULT_TARGET_KB; /*** * Set Sensor FPS as fast as possible. ***/ if(cam->params.pnp_id.device_type == DEVICE_STV_672) { if(cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_15; else cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_30; } else { cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_30; } /*** * Set default video mode as large as possible : * for vga sensor set to vga, for cif sensor set to CIF. ***/ if (cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) { cam->sensor_type = CPIA2_SENSOR_500; cam->video_size = VIDEOSIZE_VGA; cam->params.roi.width = STV_IMAGE_VGA_COLS; cam->params.roi.height = STV_IMAGE_VGA_ROWS; } else { cam->sensor_type = CPIA2_SENSOR_410; cam->video_size = VIDEOSIZE_CIF; cam->params.roi.width = STV_IMAGE_CIF_COLS; cam->params.roi.height = STV_IMAGE_CIF_ROWS; } cam->width = cam->params.roi.width; cam->height = cam->params.roi.height; } /****************************************************************************** * * cpia2_init_camera_struct * * Initializes camera struct, does not call reset to fill in defaults. *****************************************************************************/ struct camera_data *cpia2_init_camera_struct(void) { struct camera_data *cam; cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) { ERR("couldn't kmalloc cpia2 struct\n"); return NULL; } cam->present = 1; mutex_init(&cam->v4l2_lock); init_waitqueue_head(&cam->wq_stream); return cam; } /****************************************************************************** * * cpia2_init_camera * * Initializes camera. *****************************************************************************/ int cpia2_init_camera(struct camera_data *cam) { DBG("Start\n"); cam->mmapped = false; /* Get sensor and asic types before reset. */ cpia2_set_high_power(cam); cpia2_get_version_info(cam); if (cam->params.version.asic_id != CPIA2_ASIC_672) { ERR("Device IO error (asicID has incorrect value of 0x%X\n", cam->params.version.asic_id); return -ENODEV; } /* Set GPIO direction and data to a safe state. */ cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION, TRANSFER_WRITE, 0); cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE, 0); /* resetting struct requires version info for sensor and asic types */ reset_camera_struct(cam); cpia2_set_low_power(cam); DBG("End\n"); return 0; } /****************************************************************************** * * cpia2_allocate_buffers * *****************************************************************************/ int cpia2_allocate_buffers(struct camera_data *cam) { int i; if(!cam->buffers) { u32 size = cam->num_frames*sizeof(struct framebuf); cam->buffers = kmalloc(size, GFP_KERNEL); if(!cam->buffers) { ERR("couldn't kmalloc frame buffer structures\n"); return -ENOMEM; } } if(!cam->frame_buffer) { cam->frame_buffer = rvmalloc(cam->frame_size*cam->num_frames); if (!cam->frame_buffer) { ERR("couldn't vmalloc frame buffer data area\n"); kfree(cam->buffers); cam->buffers = NULL; return -ENOMEM; } } for(i=0; i<cam->num_frames-1; ++i) { cam->buffers[i].next = &cam->buffers[i+1]; cam->buffers[i].data = cam->frame_buffer +i*cam->frame_size; cam->buffers[i].status = FRAME_EMPTY; cam->buffers[i].length = 0; cam->buffers[i].max_length = 0; cam->buffers[i].num = i; } cam->buffers[i].next = cam->buffers; cam->buffers[i].data = cam->frame_buffer +i*cam->frame_size; cam->buffers[i].status = FRAME_EMPTY; cam->buffers[i].length = 0; cam->buffers[i].max_length = 0; cam->buffers[i].num = i; cam->curbuff = cam->buffers; cam->workbuff = cam->curbuff->next; DBG("buffers=%p, curbuff=%p, workbuff=%p\n", cam->buffers, cam->curbuff, cam->workbuff); return 0; } /****************************************************************************** * * cpia2_free_buffers * *****************************************************************************/ void cpia2_free_buffers(struct camera_data *cam) { if(cam->buffers) { kfree(cam->buffers); cam->buffers = NULL; } if(cam->frame_buffer) { rvfree(cam->frame_buffer, cam->frame_size*cam->num_frames); cam->frame_buffer = NULL; } } /****************************************************************************** * * cpia2_read * *****************************************************************************/ long cpia2_read(struct camera_data *cam, char __user *buf, unsigned long count, int noblock) { struct framebuf *frame; if (!count) return 0; if (!buf) { ERR("%s: buffer NULL\n",__func__); return -EINVAL; } if (!cam) { ERR("%s: Internal error, camera_data NULL!\n",__func__); return -EINVAL; } if (!cam->present) { LOG("%s: camera removed\n",__func__); return 0; /* EOF */ } if (!cam->streaming) { /* Start streaming */ cpia2_usb_stream_start(cam, cam->params.camera_state.stream_mode); } /* Copy cam->curbuff in case it changes while we're processing */ frame = cam->curbuff; if (noblock && frame->status != FRAME_READY) { return -EAGAIN; } if (frame->status != FRAME_READY) { mutex_unlock(&cam->v4l2_lock); wait_event_interruptible(cam->wq_stream, !cam->present || (frame = cam->curbuff)->status == FRAME_READY); mutex_lock(&cam->v4l2_lock); if (signal_pending(current)) return -ERESTARTSYS; if (!cam->present) return 0; } /* copy data to user space */ if (frame->length > count) return -EFAULT; if (copy_to_user(buf, frame->data, frame->length)) return -EFAULT; count = frame->length; frame->status = FRAME_EMPTY; return count; } /****************************************************************************** * * cpia2_poll * *****************************************************************************/ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp, poll_table *wait) { unsigned int status=0; if (!cam) { ERR("%s: Internal error, camera_data not found!\n",__func__); return POLLERR; } if (!cam->present) return POLLHUP; if(!cam->streaming) { /* Start streaming */ cpia2_usb_stream_start(cam, cam->params.camera_state.stream_mode); } poll_wait(filp, &cam->wq_stream, wait); if(!cam->present) status = POLLHUP; else if(cam->curbuff->status == FRAME_READY) status = POLLIN | POLLRDNORM; return status; } /****************************************************************************** * * cpia2_remap_buffer * *****************************************************************************/ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma) { const char *adr = (const char *)vma->vm_start; unsigned long size = vma->vm_end-vma->vm_start; unsigned long start_offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long start = (unsigned long) adr; unsigned long page, pos; if (!cam) return -ENODEV; DBG("mmap offset:%ld size:%ld\n", start_offset, size); if (!cam->present) return -ENODEV; if (size > cam->frame_size*cam->num_frames || (start_offset % cam->frame_size) != 0 || (start_offset+size > cam->frame_size*cam->num_frames)) return -EINVAL; pos = ((unsigned long) (cam->frame_buffer)) + start_offset; while (size > 0) { page = kvirt_to_pa(pos); if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; start += PAGE_SIZE; pos += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; else size = 0; } cam->mmapped = true; return 0; }
gpl-2.0
ErcOne/kernel-3-4-projek-n7000
net/netfilter/xt_conntrack.c
7997
9832
/* * xt_conntrack - Netfilter module to match connection tracking * information. (Superset of Rusty's minimalistic state match.) * * (C) 2001 Marc Boucher (marc@mbsi.ca). * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_conntrack.h> #include <net/netfilter/nf_conntrack.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: connection tracking state match"); MODULE_ALIAS("ipt_conntrack"); MODULE_ALIAS("ip6t_conntrack"); static bool conntrack_addrcmp(const union nf_inet_addr *kaddr, const union nf_inet_addr *uaddr, const union nf_inet_addr *umask, unsigned int l3proto) { if (l3proto == NFPROTO_IPV4) return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0; else if (l3proto == NFPROTO_IPV6) return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6, &uaddr->in6) == 0; else return false; } static inline bool conntrack_mt_origsrc(const struct nf_conn *ct, const struct xt_conntrack_mtinfo2 *info, u_int8_t family) { return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, &info->origsrc_addr, &info->origsrc_mask, family); } static inline bool conntrack_mt_origdst(const struct nf_conn *ct, const struct xt_conntrack_mtinfo2 *info, u_int8_t family) { return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3, &info->origdst_addr, &info->origdst_mask, family); } static inline bool conntrack_mt_replsrc(const struct nf_conn *ct, const struct xt_conntrack_mtinfo2 *info, u_int8_t family) { return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3, &info->replsrc_addr, &info->replsrc_mask, family); } static inline bool conntrack_mt_repldst(const struct nf_conn *ct, const struct xt_conntrack_mtinfo2 *info, u_int8_t family) { return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3, &info->repldst_addr, &info->repldst_mask, family); } static inline bool ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info, const struct nf_conn *ct) { const struct nf_conntrack_tuple *tuple; tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; if ((info->match_flags & XT_CONNTRACK_PROTO) && (nf_ct_protonum(ct) == info->l4proto) ^ !(info->invert_flags & XT_CONNTRACK_PROTO)) return false; /* Shortcut to match all recognized protocols by using ->src.all. */ if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) && (tuple->src.u.all == info->origsrc_port) ^ !(info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT)) return false; if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) && (tuple->dst.u.all == info->origdst_port) ^ !(info->invert_flags & XT_CONNTRACK_ORIGDST_PORT)) return false; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) && (tuple->src.u.all == info->replsrc_port) ^ !(info->invert_flags & XT_CONNTRACK_REPLSRC_PORT)) return false; if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) && (tuple->dst.u.all == info->repldst_port) ^ !(info->invert_flags & XT_CONNTRACK_REPLDST_PORT)) return false; return true; } static inline bool port_match(u16 min, u16 max, u16 port, bool invert) { return (port >= min && port <= max) ^ invert; } static inline bool ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info, const struct nf_conn *ct) { const struct nf_conntrack_tuple *tuple; tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; if ((info->match_flags & XT_CONNTRACK_PROTO) && (nf_ct_protonum(ct) == info->l4proto) ^ !(info->invert_flags & XT_CONNTRACK_PROTO)) return false; /* Shortcut to match all recognized protocols by using ->src.all. */ if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) && !port_match(info->origsrc_port, info->origsrc_port_high, ntohs(tuple->src.u.all), info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT)) return false; if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) && !port_match(info->origdst_port, info->origdst_port_high, ntohs(tuple->dst.u.all), info->invert_flags & XT_CONNTRACK_ORIGDST_PORT)) return false; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) && !port_match(info->replsrc_port, info->replsrc_port_high, ntohs(tuple->src.u.all), info->invert_flags & XT_CONNTRACK_REPLSRC_PORT)) return false; if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) && !port_match(info->repldst_port, info->repldst_port_high, ntohs(tuple->dst.u.all), info->invert_flags & XT_CONNTRACK_REPLDST_PORT)) return false; return true; } static bool conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 state_mask, u16 status_mask) { const struct xt_conntrack_mtinfo2 *info = par->matchinfo; enum ip_conntrack_info ctinfo; const struct nf_conn *ct; unsigned int statebit; ct = nf_ct_get(skb, &ctinfo); if (ct) { if (nf_ct_is_untracked(ct)) statebit = XT_CONNTRACK_STATE_UNTRACKED; else statebit = XT_CONNTRACK_STATE_BIT(ctinfo); } else statebit = XT_CONNTRACK_STATE_INVALID; if (info->match_flags & XT_CONNTRACK_STATE) { if (ct != NULL) { if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) statebit |= XT_CONNTRACK_STATE_SNAT; if (test_bit(IPS_DST_NAT_BIT, &ct->status)) statebit |= XT_CONNTRACK_STATE_DNAT; } if (!!(state_mask & statebit) ^ !(info->invert_flags & XT_CONNTRACK_STATE)) return false; } if (ct == NULL) return info->match_flags & XT_CONNTRACK_STATE; if ((info->match_flags & XT_CONNTRACK_DIRECTION) && (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ !(info->invert_flags & XT_CONNTRACK_DIRECTION)) return false; if (info->match_flags & XT_CONNTRACK_ORIGSRC) if (conntrack_mt_origsrc(ct, info, par->family) ^ !(info->invert_flags & XT_CONNTRACK_ORIGSRC)) return false; if (info->match_flags & XT_CONNTRACK_ORIGDST) if (conntrack_mt_origdst(ct, info, par->family) ^ !(info->invert_flags & XT_CONNTRACK_ORIGDST)) return false; if (info->match_flags & XT_CONNTRACK_REPLSRC) if (conntrack_mt_replsrc(ct, info, par->family) ^ !(info->invert_flags & XT_CONNTRACK_REPLSRC)) return false; if (info->match_flags & XT_CONNTRACK_REPLDST) if (conntrack_mt_repldst(ct, info, par->family) ^ !(info->invert_flags & XT_CONNTRACK_REPLDST)) return false; if (par->match->revision != 3) { if (!ct_proto_port_check(info, ct)) return false; } else { if (!ct_proto_port_check_v3(par->matchinfo, ct)) return false; } if ((info->match_flags & XT_CONNTRACK_STATUS) && (!!(status_mask & ct->status) ^ !(info->invert_flags & XT_CONNTRACK_STATUS))) return false; if (info->match_flags & XT_CONNTRACK_EXPIRES) { unsigned long expires = 0; if (timer_pending(&ct->timeout)) expires = (ct->timeout.expires - jiffies) / HZ; if ((expires >= info->expires_min && expires <= info->expires_max) ^ !(info->invert_flags & XT_CONNTRACK_EXPIRES)) return false; } return true; } static bool conntrack_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_conntrack_mtinfo1 *info = par->matchinfo; return conntrack_mt(skb, par, info->state_mask, info->status_mask); } static bool conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_conntrack_mtinfo2 *info = par->matchinfo; return conntrack_mt(skb, par, info->state_mask, info->status_mask); } static bool conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_conntrack_mtinfo3 *info = par->matchinfo; return conntrack_mt(skb, par, info->state_mask, info->status_mask); } static int conntrack_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_match conntrack_mt_reg[] __read_mostly = { { .name = "conntrack", .revision = 1, .family = NFPROTO_UNSPEC, .matchsize = sizeof(struct xt_conntrack_mtinfo1), .match = conntrack_mt_v1, .checkentry = conntrack_mt_check, .destroy = conntrack_mt_destroy, .me = THIS_MODULE, }, { .name = "conntrack", .revision = 2, .family = NFPROTO_UNSPEC, .matchsize = sizeof(struct xt_conntrack_mtinfo2), .match = conntrack_mt_v2, .checkentry = conntrack_mt_check, .destroy = conntrack_mt_destroy, .me = THIS_MODULE, }, { .name = "conntrack", .revision = 3, .family = NFPROTO_UNSPEC, .matchsize = sizeof(struct xt_conntrack_mtinfo3), .match = conntrack_mt_v3, .checkentry = conntrack_mt_check, .destroy = conntrack_mt_destroy, .me = THIS_MODULE, }, }; static int __init conntrack_mt_init(void) { return xt_register_matches(conntrack_mt_reg, ARRAY_SIZE(conntrack_mt_reg)); } static void __exit conntrack_mt_exit(void) { xt_unregister_matches(conntrack_mt_reg, ARRAY_SIZE(conntrack_mt_reg)); } module_init(conntrack_mt_init); module_exit(conntrack_mt_exit);
gpl-2.0
garwedgess/android_kernel_lge_g4
drivers/leds/dell-led.c
7997
4349
/* * dell_led.c - Dell LED Driver * * Copyright (C) 2010 Dell Inc. * Louis Davis <louis_davis@dell.com> * Jim Dailey <jim_dailey@dell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation. * */ #include <linux/acpi.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/module.h> MODULE_AUTHOR("Louis Davis/Jim Dailey"); MODULE_DESCRIPTION("Dell LED Control Driver"); MODULE_LICENSE("GPL"); #define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396" MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID); /* Error Result Codes: */ #define INVALID_DEVICE_ID 250 #define INVALID_PARAMETER 251 #define INVALID_BUFFER 252 #define INTERFACE_ERROR 253 #define UNSUPPORTED_COMMAND 254 #define UNSPECIFIED_ERROR 255 /* Device ID */ #define DEVICE_ID_PANEL_BACK 1 /* LED Commands */ #define CMD_LED_ON 16 #define CMD_LED_OFF 17 #define CMD_LED_BLINK 18 struct bios_args { unsigned char length; unsigned char result_code; unsigned char device_id; unsigned char command; unsigned char on_time; unsigned char off_time; }; static int dell_led_perform_fn(u8 length, u8 result_code, u8 device_id, u8 command, u8 on_time, u8 off_time) { struct bios_args *bios_return; u8 return_code; union acpi_object *obj; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer input; acpi_status status; struct bios_args args; args.length = length; args.result_code = result_code; args.device_id = device_id; args.command = command; args.on_time = on_time; args.off_time = off_time; input.length = sizeof(struct bios_args); input.pointer = &args; status = wmi_evaluate_method(DELL_LED_BIOS_GUID, 1, 1, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return -EINVAL; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return -EINVAL; } bios_return = ((struct bios_args *)obj->buffer.pointer); return_code = bios_return->result_code; kfree(obj); return return_code; } static int led_on(void) { return dell_led_perform_fn(3, /* Length of command */ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ DEVICE_ID_PANEL_BACK, /* Device ID */ CMD_LED_ON, /* Command */ 0, /* not used */ 0); /* not used */ } static int led_off(void) { return dell_led_perform_fn(3, /* Length of command */ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ DEVICE_ID_PANEL_BACK, /* Device ID */ CMD_LED_OFF, /* Command */ 0, /* not used */ 0); /* not used */ } static int led_blink(unsigned char on_eighths, unsigned char off_eighths) { return dell_led_perform_fn(5, /* Length of command */ INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ DEVICE_ID_PANEL_BACK, /* Device ID */ CMD_LED_BLINK, /* Command */ on_eighths, /* blink on in eigths of a second */ off_eighths); /* blink off in eights of a second */ } static void dell_led_set(struct led_classdev *led_cdev, enum led_brightness value) { if (value == LED_OFF) led_off(); else led_on(); } static int dell_led_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { unsigned long on_eighths; unsigned long off_eighths; /* The Dell LED delay is based on 125ms intervals. Need to round up to next interval. */ on_eighths = (*delay_on + 124) / 125; if (0 == on_eighths) on_eighths = 1; if (on_eighths > 255) on_eighths = 255; *delay_on = on_eighths * 125; off_eighths = (*delay_off + 124) / 125; if (0 == off_eighths) off_eighths = 1; if (off_eighths > 255) off_eighths = 255; *delay_off = off_eighths * 125; led_blink(on_eighths, off_eighths); return 0; } static struct led_classdev dell_led = { .name = "dell::lid", .brightness = LED_OFF, .max_brightness = 1, .brightness_set = dell_led_set, .blink_set = dell_led_blink, .flags = LED_CORE_SUSPENDRESUME, }; static int __init dell_led_init(void) { int error = 0; if (!wmi_has_guid(DELL_LED_BIOS_GUID)) return -ENODEV; error = led_off(); if (error != 0) return -ENODEV; return led_classdev_register(NULL, &dell_led); } static void __exit dell_led_exit(void) { led_classdev_unregister(&dell_led); led_off(); } module_init(dell_led_init); module_exit(dell_led_exit);
gpl-2.0
ballock/kernel-xenial-bbr
drivers/pcmcia/pxa2xx_trizeps4.c
9789
4676
/* * linux/drivers/pcmcia/pxa2xx_trizeps4.c * * TRIZEPS PCMCIA specific routines. * * Author: Jürgen Schindele * Created: 20 02, 2006 * Copyright: Jürgen Schindele * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/pxa2xx-regs.h> #include <mach/trizeps4.h> #include "soc_common.h" extern void board_pcmcia_power(int power); static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { /* we dont have voltage/card/ready detection * so we dont need interrupts for it */ switch (skt->nr) { case 0: skt->stat[SOC_STAT_CD].gpio = GPIO_PCD; skt->stat[SOC_STAT_CD].name = "cs0_cd"; skt->stat[SOC_STAT_RDY].gpio = GPIO_PRDY; skt->stat[SOC_STAT_RDY].name = "cs0_rdy"; break; default: break; } /* release the reset of this card */ pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->socket.pci_irq); return 0; } static unsigned long trizeps_pcmcia_status[2]; static void trizeps_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned short status = 0, change; status = CFSR_readw(); change = (status ^ trizeps_pcmcia_status[skt->nr]) & ConXS_CFSR_BVD_MASK; if (change) { trizeps_pcmcia_status[skt->nr] = status; if (status & ConXS_CFSR_BVD1) { /* enable_irq empty */ } else { /* disable_irq empty */ } } switch (skt->nr) { case 0: /* just fill in fix states */ state->bvd1 = (status & ConXS_CFSR_BVD1) ? 1 : 0; state->bvd2 = (status & ConXS_CFSR_BVD2) ? 1 : 0; state->vs_3v = (status & ConXS_CFSR_VS1) ? 0 : 1; state->vs_Xv = (status & ConXS_CFSR_VS2) ? 0 : 1; break; #ifndef CONFIG_MACH_TRIZEPS_CONXS /* on ConXS we only have one slot. Second is inactive */ case 1: state->detect = 0; state->ready = 0; state->bvd1 = 0; state->bvd2 = 0; state->vs_3v = 0; state->vs_Xv = 0; break; #endif } } static int trizeps_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { int ret = 0; unsigned short power = 0; /* we do nothing here just check a bit */ switch (state->Vcc) { case 0: power &= 0xfc; break; case 33: power |= ConXS_BCR_S0_VCC_3V3; break; case 50: pr_err("%s(): Vcc 5V not supported in socket\n", __func__); break; default: pr_err("%s(): bad Vcc %u\n", __func__, state->Vcc); ret = -1; } switch (state->Vpp) { case 0: power &= 0xf3; break; case 33: power |= ConXS_BCR_S0_VPP_3V3; break; case 120: pr_err("%s(): Vpp 12V not supported in socket\n", __func__); break; default: if (state->Vpp != state->Vcc) { pr_err("%s(): bad Vpp %u\n", __func__, state->Vpp); ret = -1; } } switch (skt->nr) { case 0: /* we only have 3.3V */ board_pcmcia_power(power); break; #ifndef CONFIG_MACH_TRIZEPS_CONXS /* on ConXS we only have one slot. Second is inactive */ case 1: #endif default: break; } return ret; } static void trizeps_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { /* default is on */ board_pcmcia_power(0x9); } static void trizeps_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { board_pcmcia_power(0x0); } static struct pcmcia_low_level trizeps_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = trizeps_pcmcia_hw_init, .socket_state = trizeps_pcmcia_socket_state, .configure_socket = trizeps_pcmcia_configure_socket, .socket_init = trizeps_pcmcia_socket_init, .socket_suspend = trizeps_pcmcia_socket_suspend, #ifdef CONFIG_MACH_TRIZEPS_CONXS .nr = 1, #else .nr = 2, #endif .first = 0, }; static struct platform_device *trizeps_pcmcia_device; static int __init trizeps_pcmcia_init(void) { int ret; if (!machine_is_trizeps4() && !machine_is_trizeps4wl()) return -ENODEV; trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!trizeps_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(trizeps_pcmcia_device, &trizeps_pcmcia_ops, sizeof(trizeps_pcmcia_ops)); if (ret == 0) ret = platform_device_add(trizeps_pcmcia_device); if (ret) platform_device_put(trizeps_pcmcia_device); return ret; } static void __exit trizeps_pcmcia_exit(void) { platform_device_unregister(trizeps_pcmcia_device); } fs_initcall(trizeps_pcmcia_init); module_exit(trizeps_pcmcia_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Juergen Schindele"); MODULE_ALIAS("platform:pxa2xx-pcmcia");
gpl-2.0
adrientetar/semc-7x30-kernel-ics
arch/x86/kernel/doublefault_32.c
9789
1695
#include <linux/mm.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> #include <linux/fs.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/desc.h> #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) static void doublefault_fn(void) { struct desc_ptr gdt_desc = {0, 0}; unsigned long gdt, tss; store_gdt(&gdt_desc); gdt = gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); if (ptr_ok(gdt)) { gdt += GDT_ENTRY_TSS << 3; tss = get_desc_base((struct desc_struct *)gdt); printk(KERN_EMERG "double fault, tss at %08lx\n", tss); if (ptr_ok(tss)) { struct x86_hw_tss *t = (struct x86_hw_tss *)tss; printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->ip, t->sp); printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", t->ax, t->bx, t->cx, t->dx); printk(KERN_EMERG "esi = %08lx, edi = %08lx\n", t->si, t->di); } } for (;;) cpu_relax(); } struct tss_struct doublefault_tss __cacheline_aligned = { .x86_tss = { .sp0 = STACK_START, .ss0 = __KERNEL_DS, .ldt = 0, .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, .ip = (unsigned long) doublefault_fn, /* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START, .es = __USER_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, .ds = __USER_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa_nodebug(swapper_pg_dir), } };
gpl-2.0
Isopod/linux
drivers/pcmcia/pxa2xx_trizeps4.c
9789
4676
/* * linux/drivers/pcmcia/pxa2xx_trizeps4.c * * TRIZEPS PCMCIA specific routines. * * Author: Jürgen Schindele * Created: 20 02, 2006 * Copyright: Jürgen Schindele * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/pxa2xx-regs.h> #include <mach/trizeps4.h> #include "soc_common.h" extern void board_pcmcia_power(int power); static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { /* we dont have voltage/card/ready detection * so we dont need interrupts for it */ switch (skt->nr) { case 0: skt->stat[SOC_STAT_CD].gpio = GPIO_PCD; skt->stat[SOC_STAT_CD].name = "cs0_cd"; skt->stat[SOC_STAT_RDY].gpio = GPIO_PRDY; skt->stat[SOC_STAT_RDY].name = "cs0_rdy"; break; default: break; } /* release the reset of this card */ pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->socket.pci_irq); return 0; } static unsigned long trizeps_pcmcia_status[2]; static void trizeps_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned short status = 0, change; status = CFSR_readw(); change = (status ^ trizeps_pcmcia_status[skt->nr]) & ConXS_CFSR_BVD_MASK; if (change) { trizeps_pcmcia_status[skt->nr] = status; if (status & ConXS_CFSR_BVD1) { /* enable_irq empty */ } else { /* disable_irq empty */ } } switch (skt->nr) { case 0: /* just fill in fix states */ state->bvd1 = (status & ConXS_CFSR_BVD1) ? 1 : 0; state->bvd2 = (status & ConXS_CFSR_BVD2) ? 1 : 0; state->vs_3v = (status & ConXS_CFSR_VS1) ? 0 : 1; state->vs_Xv = (status & ConXS_CFSR_VS2) ? 0 : 1; break; #ifndef CONFIG_MACH_TRIZEPS_CONXS /* on ConXS we only have one slot. Second is inactive */ case 1: state->detect = 0; state->ready = 0; state->bvd1 = 0; state->bvd2 = 0; state->vs_3v = 0; state->vs_Xv = 0; break; #endif } } static int trizeps_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { int ret = 0; unsigned short power = 0; /* we do nothing here just check a bit */ switch (state->Vcc) { case 0: power &= 0xfc; break; case 33: power |= ConXS_BCR_S0_VCC_3V3; break; case 50: pr_err("%s(): Vcc 5V not supported in socket\n", __func__); break; default: pr_err("%s(): bad Vcc %u\n", __func__, state->Vcc); ret = -1; } switch (state->Vpp) { case 0: power &= 0xf3; break; case 33: power |= ConXS_BCR_S0_VPP_3V3; break; case 120: pr_err("%s(): Vpp 12V not supported in socket\n", __func__); break; default: if (state->Vpp != state->Vcc) { pr_err("%s(): bad Vpp %u\n", __func__, state->Vpp); ret = -1; } } switch (skt->nr) { case 0: /* we only have 3.3V */ board_pcmcia_power(power); break; #ifndef CONFIG_MACH_TRIZEPS_CONXS /* on ConXS we only have one slot. Second is inactive */ case 1: #endif default: break; } return ret; } static void trizeps_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { /* default is on */ board_pcmcia_power(0x9); } static void trizeps_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { board_pcmcia_power(0x0); } static struct pcmcia_low_level trizeps_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = trizeps_pcmcia_hw_init, .socket_state = trizeps_pcmcia_socket_state, .configure_socket = trizeps_pcmcia_configure_socket, .socket_init = trizeps_pcmcia_socket_init, .socket_suspend = trizeps_pcmcia_socket_suspend, #ifdef CONFIG_MACH_TRIZEPS_CONXS .nr = 1, #else .nr = 2, #endif .first = 0, }; static struct platform_device *trizeps_pcmcia_device; static int __init trizeps_pcmcia_init(void) { int ret; if (!machine_is_trizeps4() && !machine_is_trizeps4wl()) return -ENODEV; trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!trizeps_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(trizeps_pcmcia_device, &trizeps_pcmcia_ops, sizeof(trizeps_pcmcia_ops)); if (ret == 0) ret = platform_device_add(trizeps_pcmcia_device); if (ret) platform_device_put(trizeps_pcmcia_device); return ret; } static void __exit trizeps_pcmcia_exit(void) { platform_device_unregister(trizeps_pcmcia_device); } fs_initcall(trizeps_pcmcia_init); module_exit(trizeps_pcmcia_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Juergen Schindele"); MODULE_ALIAS("platform:pxa2xx-pcmcia");
gpl-2.0
atmchrispark/linux-at91
arch/x86/mm/kmemcheck/kmemcheck.c
10813
14463
/** * kmemcheck - a heavyweight memory checker for the linux kernel * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no> * (With a lot of help from Ingo Molnar and Pekka Enberg.) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2) as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/page-flags.h> #include <linux/percpu.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/types.h> #include <asm/cacheflush.h> #include <asm/kmemcheck.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "error.h" #include "opcode.h" #include "pte.h" #include "selftest.h" #include "shadow.h" #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT # define KMEMCHECK_ENABLED 0 #endif #ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT # define KMEMCHECK_ENABLED 1 #endif #ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT # define KMEMCHECK_ENABLED 2 #endif int kmemcheck_enabled = KMEMCHECK_ENABLED; int __init kmemcheck_init(void) { #ifdef CONFIG_SMP /* * Limit SMP to use a single CPU. We rely on the fact that this code * runs before SMP is set up. */ if (setup_max_cpus > 1) { printk(KERN_INFO "kmemcheck: Limiting number of CPUs to 1.\n"); setup_max_cpus = 1; } #endif if (!kmemcheck_selftest()) { printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n"); kmemcheck_enabled = 0; return -EINVAL; } printk(KERN_INFO "kmemcheck: Initialized\n"); return 0; } early_initcall(kmemcheck_init); /* * We need to parse the kmemcheck= option before any memory is allocated. */ static int __init param_kmemcheck(char *str) { if (!str) return -EINVAL; sscanf(str, "%d", &kmemcheck_enabled); return 0; } early_param("kmemcheck", param_kmemcheck); int kmemcheck_show_addr(unsigned long address) { pte_t *pte; pte = kmemcheck_pte_lookup(address); if (!pte) return 0; set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); __flush_tlb_one(address); return 1; } int kmemcheck_hide_addr(unsigned long address) { pte_t *pte; pte = kmemcheck_pte_lookup(address); if (!pte) return 0; set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); __flush_tlb_one(address); return 1; } struct kmemcheck_context { bool busy; int balance; /* * There can be at most two memory operands to an instruction, but * each address can cross a page boundary -- so we may need up to * four addresses that must be hidden/revealed for each fault. */ unsigned long addr[4]; unsigned long n_addrs; unsigned long flags; /* Data size of the instruction that caused a fault. */ unsigned int size; }; static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); bool kmemcheck_active(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); return data->balance > 0; } /* Save an address that needs to be shown/hidden */ static void kmemcheck_save_addr(unsigned long addr) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); data->addr[data->n_addrs++] = addr; } static unsigned int kmemcheck_show_all(void) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); unsigned int i; unsigned int n; n = 0; for (i = 0; i < data->n_addrs; ++i) n += kmemcheck_show_addr(data->addr[i]); return n; } static unsigned int kmemcheck_hide_all(void) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); unsigned int i; unsigned int n; n = 0; for (i = 0; i < data->n_addrs; ++i) n += kmemcheck_hide_addr(data->addr[i]); return n; } /* * Called from the #PF handler. */ void kmemcheck_show(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); BUG_ON(!irqs_disabled()); if (unlikely(data->balance != 0)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); data->balance = 0; return; } /* * None of the addresses actually belonged to kmemcheck. Note that * this is not an error. */ if (kmemcheck_show_all() == 0) return; ++data->balance; /* * The IF needs to be cleared as well, so that the faulting * instruction can run "uninterrupted". Otherwise, we might take * an interrupt and start executing that before we've had a chance * to hide the page again. * * NOTE: In the rare case of multiple faults, we must not override * the original flags: */ if (!(regs->flags & X86_EFLAGS_TF)) data->flags = regs->flags; regs->flags |= X86_EFLAGS_TF; regs->flags &= ~X86_EFLAGS_IF; } /* * Called from the #DB handler. */ void kmemcheck_hide(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); int n; BUG_ON(!irqs_disabled()); if (unlikely(data->balance != 1)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); data->n_addrs = 0; data->balance = 0; if (!(data->flags & X86_EFLAGS_TF)) regs->flags &= ~X86_EFLAGS_TF; if (data->flags & X86_EFLAGS_IF) regs->flags |= X86_EFLAGS_IF; return; } if (kmemcheck_enabled) n = kmemcheck_hide_all(); else n = kmemcheck_show_all(); if (n == 0) return; --data->balance; data->n_addrs = 0; if (!(data->flags & X86_EFLAGS_TF)) regs->flags &= ~X86_EFLAGS_TF; if (data->flags & X86_EFLAGS_IF) regs->flags |= X86_EFLAGS_IF; } void kmemcheck_show_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) { unsigned long address; pte_t *pte; unsigned int level; address = (unsigned long) page_address(&p[i]); pte = lookup_address(address, &level); BUG_ON(!pte); BUG_ON(level != PG_LEVEL_4K); set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); __flush_tlb_one(address); } } bool kmemcheck_page_is_tracked(struct page *p) { /* This will also check the "hidden" flag of the PTE. */ return kmemcheck_pte_lookup((unsigned long) page_address(p)); } void kmemcheck_hide_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) { unsigned long address; pte_t *pte; unsigned int level; address = (unsigned long) page_address(&p[i]); pte = lookup_address(address, &level); BUG_ON(!pte); BUG_ON(level != PG_LEVEL_4K); set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); __flush_tlb_one(address); } } /* Access may NOT cross page boundary */ static void kmemcheck_read_strict(struct pt_regs *regs, unsigned long addr, unsigned int size) { void *shadow; enum kmemcheck_shadow status; shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return; kmemcheck_save_addr(addr); status = kmemcheck_shadow_test(shadow, size); if (status == KMEMCHECK_SHADOW_INITIALIZED) return; if (kmemcheck_enabled) kmemcheck_error_save(status, addr, size, regs); if (kmemcheck_enabled == 2) kmemcheck_enabled = 0; /* Don't warn about it again. */ kmemcheck_shadow_set(shadow, size); } bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) { enum kmemcheck_shadow status; void *shadow; shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return true; status = kmemcheck_shadow_test_all(shadow, size); return status == KMEMCHECK_SHADOW_INITIALIZED; } /* Access may cross page boundary */ static void kmemcheck_read(struct pt_regs *regs, unsigned long addr, unsigned int size) { unsigned long page = addr & PAGE_MASK; unsigned long next_addr = addr + size - 1; unsigned long next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { kmemcheck_read_strict(regs, addr, size); return; } /* * What we do is basically to split the access across the * two pages and handle each part separately. Yes, this means * that we may now see reads that are 3 + 5 bytes, for * example (and if both are uninitialized, there will be two * reports), but it makes the code a lot simpler. */ kmemcheck_read_strict(regs, addr, next_page - addr); kmemcheck_read_strict(regs, next_page, next_addr - next_page); } static void kmemcheck_write_strict(struct pt_regs *regs, unsigned long addr, unsigned int size) { void *shadow; shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return; kmemcheck_save_addr(addr); kmemcheck_shadow_set(shadow, size); } static void kmemcheck_write(struct pt_regs *regs, unsigned long addr, unsigned int size) { unsigned long page = addr & PAGE_MASK; unsigned long next_addr = addr + size - 1; unsigned long next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { kmemcheck_write_strict(regs, addr, size); return; } /* See comment in kmemcheck_read(). */ kmemcheck_write_strict(regs, addr, next_page - addr); kmemcheck_write_strict(regs, next_page, next_addr - next_page); } /* * Copying is hard. We have two addresses, each of which may be split across * a page (and each page will have different shadow addresses). */ static void kmemcheck_copy(struct pt_regs *regs, unsigned long src_addr, unsigned long dst_addr, unsigned int size) { uint8_t shadow[8]; enum kmemcheck_shadow status; unsigned long page; unsigned long next_addr; unsigned long next_page; uint8_t *x; unsigned int i; unsigned int n; BUG_ON(size > sizeof(shadow)); page = src_addr & PAGE_MASK; next_addr = src_addr + size - 1; next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { /* Same page */ x = kmemcheck_shadow_lookup(src_addr); if (x) { kmemcheck_save_addr(src_addr); for (i = 0; i < size; ++i) shadow[i] = x[i]; } else { for (i = 0; i < size; ++i) shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } else { n = next_page - src_addr; BUG_ON(n > sizeof(shadow)); /* First page */ x = kmemcheck_shadow_lookup(src_addr); if (x) { kmemcheck_save_addr(src_addr); for (i = 0; i < n; ++i) shadow[i] = x[i]; } else { /* Not tracked */ for (i = 0; i < n; ++i) shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } /* Second page */ x = kmemcheck_shadow_lookup(next_page); if (x) { kmemcheck_save_addr(next_page); for (i = n; i < size; ++i) shadow[i] = x[i - n]; } else { /* Not tracked */ for (i = n; i < size; ++i) shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } page = dst_addr & PAGE_MASK; next_addr = dst_addr + size - 1; next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { /* Same page */ x = kmemcheck_shadow_lookup(dst_addr); if (x) { kmemcheck_save_addr(dst_addr); for (i = 0; i < size; ++i) { x[i] = shadow[i]; shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } } else { n = next_page - dst_addr; BUG_ON(n > sizeof(shadow)); /* First page */ x = kmemcheck_shadow_lookup(dst_addr); if (x) { kmemcheck_save_addr(dst_addr); for (i = 0; i < n; ++i) { x[i] = shadow[i]; shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } /* Second page */ x = kmemcheck_shadow_lookup(next_page); if (x) { kmemcheck_save_addr(next_page); for (i = n; i < size; ++i) { x[i - n] = shadow[i]; shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } } status = kmemcheck_shadow_test(shadow, size); if (status == KMEMCHECK_SHADOW_INITIALIZED) return; if (kmemcheck_enabled) kmemcheck_error_save(status, src_addr, size, regs); if (kmemcheck_enabled == 2) kmemcheck_enabled = 0; } enum kmemcheck_method { KMEMCHECK_READ, KMEMCHECK_WRITE, }; static void kmemcheck_access(struct pt_regs *regs, unsigned long fallback_address, enum kmemcheck_method fallback_method) { const uint8_t *insn; const uint8_t *insn_primary; unsigned int size; struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); /* Recursive fault -- ouch. */ if (data->busy) { kmemcheck_show_addr(fallback_address); kmemcheck_error_save_bug(regs); return; } data->busy = true; insn = (const uint8_t *) regs->ip; insn_primary = kmemcheck_opcode_get_primary(insn); kmemcheck_opcode_decode(insn, &size); switch (insn_primary[0]) { #ifdef CONFIG_KMEMCHECK_BITOPS_OK /* AND, OR, XOR */ /* * Unfortunately, these instructions have to be excluded from * our regular checking since they access only some (and not * all) bits. This clears out "bogus" bitfield-access warnings. */ case 0x80: case 0x81: case 0x82: case 0x83: switch ((insn_primary[1] >> 3) & 7) { /* OR */ case 1: /* AND */ case 4: /* XOR */ case 6: kmemcheck_write(regs, fallback_address, size); goto out; /* ADD */ case 0: /* ADC */ case 2: /* SBB */ case 3: /* SUB */ case 5: /* CMP */ case 7: break; } break; #endif /* MOVS, MOVSB, MOVSW, MOVSD */ case 0xa4: case 0xa5: /* * These instructions are special because they take two * addresses, but we only get one page fault. */ kmemcheck_copy(regs, regs->si, regs->di, size); goto out; /* CMPS, CMPSB, CMPSW, CMPSD */ case 0xa6: case 0xa7: kmemcheck_read(regs, regs->si, size); kmemcheck_read(regs, regs->di, size); goto out; } /* * If the opcode isn't special in any way, we use the data from the * page fault handler to determine the address and type of memory * access. */ switch (fallback_method) { case KMEMCHECK_READ: kmemcheck_read(regs, fallback_address, size); goto out; case KMEMCHECK_WRITE: kmemcheck_write(regs, fallback_address, size); goto out; } out: data->busy = false; } bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { pte_t *pte; /* * XXX: Is it safe to assume that memory accesses from virtual 86 * mode or non-kernel code segments will _never_ access kernel * memory (e.g. tracked pages)? For now, we need this to avoid * invoking kmemcheck for PnP BIOS calls. */ if (regs->flags & X86_VM_MASK) return false; if (regs->cs != __KERNEL_CS) return false; pte = kmemcheck_pte_lookup(address); if (!pte) return false; WARN_ON_ONCE(in_nmi()); if (error_code & 2) kmemcheck_access(regs, address, KMEMCHECK_WRITE); else kmemcheck_access(regs, address, KMEMCHECK_READ); kmemcheck_show(regs); return true; } bool kmemcheck_trap(struct pt_regs *regs) { if (!kmemcheck_active(regs)) return false; /* We're done. */ kmemcheck_hide(regs); return true; }
gpl-2.0
CyanogenMod/lge-kernel-p880
arch/mips/emma/markeins/led.c
13885
1510
/* * Copyright (C) NEC Electronics Corporation 2004-2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <asm/emma/emma2rh.h> const unsigned long clear = 0x20202020; #define LED_BASE 0xb1400038 void markeins_led_clear(void) { emma2rh_out32(LED_BASE, clear); emma2rh_out32(LED_BASE + 4, clear); } void markeins_led(const char *str) { int i; int len = strlen(str); markeins_led_clear(); if (len > 8) len = 8; if (emma2rh_in32(0xb0000800) & (0x1 << 18)) for (i = 0; i < len; i++) emma2rh_out8(LED_BASE + i, str[i]); else for (i = 0; i < len; i++) emma2rh_out8(LED_BASE + (i & 4) + (3 - (i & 3)), str[i]); } void markeins_led_hex(u32 val) { char str[10]; sprintf(str, "%08x", val); markeins_led(str); }
gpl-2.0
francegabb/mxu1130driver
drivers/thermal/int340x_thermal/processor_thermal_device.c
62
7959
/* * processor_thermal_device.c * Copyright (c) 2014, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/acpi.h> /* Broadwell-U/HSB thermal reporting device */ #define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 #define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03 /* Braswell thermal reporting device */ #define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC struct power_config { u32 index; u32 min_uw; u32 max_uw; u32 tmin_us; u32 tmax_us; u32 step_uw; }; struct proc_thermal_device { struct device *dev; struct acpi_device *adev; struct power_config power_limits[2]; }; enum proc_thermal_emum_mode_type { PROC_THERMAL_NONE, PROC_THERMAL_PCI, PROC_THERMAL_PLATFORM_DEV }; /* * We can have only one type of enumeration, PCI or Platform, * not both. So we don't need instance specific data. */ static enum proc_thermal_emum_mode_type proc_thermal_emum_mode = PROC_THERMAL_NONE; #define POWER_LIMIT_SHOW(index, suffix) \ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct pci_dev *pci_dev; \ struct platform_device *pdev; \ struct proc_thermal_device *proc_dev; \ \ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ pdev = to_platform_device(dev); \ proc_dev = platform_get_drvdata(pdev); \ } else { \ pci_dev = to_pci_dev(dev); \ proc_dev = pci_get_drvdata(pci_dev); \ } \ return sprintf(buf, "%lu\n",\ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \ } POWER_LIMIT_SHOW(0, min_uw) POWER_LIMIT_SHOW(0, max_uw) POWER_LIMIT_SHOW(0, step_uw) POWER_LIMIT_SHOW(0, tmin_us) POWER_LIMIT_SHOW(0, tmax_us) POWER_LIMIT_SHOW(1, min_uw) POWER_LIMIT_SHOW(1, max_uw) POWER_LIMIT_SHOW(1, step_uw) POWER_LIMIT_SHOW(1, tmin_us) POWER_LIMIT_SHOW(1, tmax_us) static DEVICE_ATTR_RO(power_limit_0_min_uw); static DEVICE_ATTR_RO(power_limit_0_max_uw); static DEVICE_ATTR_RO(power_limit_0_step_uw); static DEVICE_ATTR_RO(power_limit_0_tmin_us); static DEVICE_ATTR_RO(power_limit_0_tmax_us); static DEVICE_ATTR_RO(power_limit_1_min_uw); static DEVICE_ATTR_RO(power_limit_1_max_uw); static DEVICE_ATTR_RO(power_limit_1_step_uw); static DEVICE_ATTR_RO(power_limit_1_tmin_us); static DEVICE_ATTR_RO(power_limit_1_tmax_us); static struct attribute *power_limit_attrs[] = { &dev_attr_power_limit_0_min_uw.attr, &dev_attr_power_limit_1_min_uw.attr, &dev_attr_power_limit_0_max_uw.attr, &dev_attr_power_limit_1_max_uw.attr, &dev_attr_power_limit_0_step_uw.attr, &dev_attr_power_limit_1_step_uw.attr, &dev_attr_power_limit_0_tmin_us.attr, &dev_attr_power_limit_1_tmin_us.attr, &dev_attr_power_limit_0_tmax_us.attr, &dev_attr_power_limit_1_tmax_us.attr, NULL }; static struct attribute_group power_limit_attribute_group = { .attrs = power_limit_attrs, .name = "power_limits" }; static int proc_thermal_add(struct device *dev, struct proc_thermal_device **priv) { struct proc_thermal_device *proc_priv; struct acpi_device *adev; acpi_status status; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *elements, *ppcc; union acpi_object *p; int i; int ret; adev = ACPI_COMPANION(dev); if (!adev) return -ENODEV; status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); if (ACPI_FAILURE(status)) return -ENODEV; p = buf.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { dev_err(dev, "Invalid PPCC data\n"); ret = -EFAULT; goto free_buffer; } if (!p->package.count) { dev_err(dev, "Invalid PPCC package size\n"); ret = -EFAULT; goto free_buffer; } proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) { ret = -ENOMEM; goto free_buffer; } proc_priv->dev = dev; proc_priv->adev = adev; for (i = 0; i < min((int)p->package.count - 1, 2); ++i) { elements = &(p->package.elements[i+1]); if (elements->type != ACPI_TYPE_PACKAGE || elements->package.count != 6) { ret = -EFAULT; goto free_buffer; } ppcc = elements->package.elements; proc_priv->power_limits[i].index = ppcc[0].integer.value; proc_priv->power_limits[i].min_uw = ppcc[1].integer.value; proc_priv->power_limits[i].max_uw = ppcc[2].integer.value; proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value; proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value; proc_priv->power_limits[i].step_uw = ppcc[5].integer.value; } *priv = proc_priv; ret = sysfs_create_group(&dev->kobj, &power_limit_attribute_group); free_buffer: kfree(buf.pointer); return ret; } void proc_thermal_remove(struct proc_thermal_device *proc_priv) { sysfs_remove_group(&proc_priv->dev->kobj, &power_limit_attribute_group); } static int int3401_add(struct platform_device *pdev) { struct proc_thermal_device *proc_priv; int ret; if (proc_thermal_emum_mode == PROC_THERMAL_PCI) { dev_err(&pdev->dev, "error: enumerated as PCI dev\n"); return -ENODEV; } ret = proc_thermal_add(&pdev->dev, &proc_priv); if (ret) return ret; platform_set_drvdata(pdev, proc_priv); proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; return 0; } static int int3401_remove(struct platform_device *pdev) { proc_thermal_remove(platform_get_drvdata(pdev)); return 0; } static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *unused) { struct proc_thermal_device *proc_priv; int ret; if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { dev_err(&pdev->dev, "error: enumerated as platform dev\n"); return -ENODEV; } ret = pci_enable_device(pdev); if (ret < 0) { dev_err(&pdev->dev, "error: could not enable device\n"); return ret; } ret = proc_thermal_add(&pdev->dev, &proc_priv); if (ret) { pci_disable_device(pdev); return ret; } pci_set_drvdata(pdev, proc_priv); proc_thermal_emum_mode = PROC_THERMAL_PCI; return 0; } static void proc_thermal_pci_remove(struct pci_dev *pdev) { proc_thermal_remove(pci_get_drvdata(pdev)); pci_disable_device(pdev); } static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)}, { 0, }, }; MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); static struct pci_driver proc_thermal_pci_driver = { .name = "proc_thermal", .probe = proc_thermal_pci_probe, .remove = proc_thermal_pci_remove, .id_table = proc_thermal_pci_ids, }; static const struct acpi_device_id int3401_device_ids[] = { {"INT3401", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3401_device_ids); static struct platform_driver int3401_driver = { .probe = int3401_add, .remove = int3401_remove, .driver = { .name = "int3401 thermal", .acpi_match_table = int3401_device_ids, }, }; static int __init proc_thermal_init(void) { int ret; ret = platform_driver_register(&int3401_driver); if (ret) return ret; ret = pci_register_driver(&proc_thermal_pci_driver); return ret; } static void __exit proc_thermal_exit(void) { platform_driver_unregister(&int3401_driver); pci_unregister_driver(&proc_thermal_pci_driver); } module_init(proc_thermal_init); module_exit(proc_thermal_exit); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
sadven/meisiekernel
drivers/media/platform/exynos/fimg2d/fimg2d4x_blt.c
62
16243
/* linux/drivers/media/video/exynos/fimg2d/fimg2d4x_blt.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Samsung Graphics 2D driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/dma-mapping.h> #include <linux/rmap.h> #include <linux/fs.h> #include <linux/exynos_iovmm.h> #include <linux/clk-private.h> #include <asm/cacheflush.h> #ifdef CONFIG_PM_RUNTIME #include <plat/devs.h> #include <linux/pm_runtime.h> #endif #include "fimg2d.h" #include "fimg2d_clk.h" #include "fimg2d4x.h" #include "fimg2d_ctx.h" #include "fimg2d_cache.h" #include "fimg2d_helper.h" #define BLIT_TIMEOUT msecs_to_jiffies(8000) #define MAX_PREFBUFS 6 static int nbufs; static struct sysmmu_prefbuf prefbuf[MAX_PREFBUFS]; #ifndef CONFIG_EXYNOS7_IOMMU #define G2D_MAX_VMA_MAPPING 12 static int mapping_can_locked(unsigned long mapping, unsigned long mappings[], int cnt) { int i; if (!mapping) return 0; for (i = 0; i < cnt; i++) { if ((mappings[i] & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON) { if ((mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON) { struct anon_vma *anon = (struct anon_vma *) (mapping & ~PAGE_MAPPING_FLAGS); struct anon_vma *locked = (struct anon_vma *) (mappings[i] & ~PAGE_MAPPING_FLAGS); if (anon->root == locked->root) return 0; } } else if (mappings[i] != 0) { if (mappings[i] == mapping) return 0; } } return 1; } static int vma_lock_mapping_one(struct mm_struct *mm, unsigned long addr, size_t len, unsigned long mappings[], int cnt) { unsigned long end = addr + len; struct vm_area_struct *vma; struct page *page; for (vma = find_vma(mm, addr); vma && (vma->vm_start <= addr) && (addr < end); addr += vma->vm_end - vma->vm_start, vma = vma->vm_next) { struct anon_vma *anon; page = follow_page(vma, addr, 0); if (IS_ERR_OR_NULL(page) || !page->mapping) continue; anon = page_get_anon_vma(page); if (!anon) { struct address_space *mapping; get_page(page); mapping = page_mapping(page); if (mapping_can_locked( (unsigned long)mapping, mappings, cnt)) { mutex_lock(&mapping->i_mmap_mutex); mappings[cnt++] = (unsigned long)mapping; } put_page(page); } else { if (mapping_can_locked( (unsigned long)anon | PAGE_MAPPING_ANON, mappings, cnt)) { anon_vma_lock_write(anon); mappings[cnt++] = (unsigned long)page->mapping; } put_anon_vma(anon); } if (cnt == G2D_MAX_VMA_MAPPING) break; } return cnt; } static void *vma_lock_mapping(struct mm_struct *mm, struct sysmmu_prefbuf area[], int num_area) { unsigned long *mappings = NULL; /* array of G2D_MAX_VMA_MAPPINGS entries */ int cnt = 0; int i; mappings = (unsigned long *)kzalloc( sizeof(unsigned long) * G2D_MAX_VMA_MAPPING, GFP_KERNEL); if (!mappings) return NULL; down_read(&mm->mmap_sem); for (i = 0; i < num_area; i++) { cnt = vma_lock_mapping_one(mm, area[i].base, area[i].size, mappings, cnt); if (cnt == G2D_MAX_VMA_MAPPING) { pr_err("%s: area crosses to many vmas\n", __func__); break; } } if (cnt == 0) { kfree(mappings); mappings = NULL; } up_read(&mm->mmap_sem); return (void *)mappings; } static void vma_unlock_mapping(void *__mappings) { int i; unsigned long *mappings = __mappings; if (!mappings) return; for (i = 0; i < G2D_MAX_VMA_MAPPING; i++) { if (mappings[i]) { if (mappings[i] & PAGE_MAPPING_ANON) { anon_vma_unlock_write( (struct anon_vma *)(mappings[i] & ~PAGE_MAPPING_FLAGS)); } else { struct address_space *mapping = (void *)mappings[i]; mutex_unlock(&mapping->i_mmap_mutex); } } } kfree(mappings); } #else static void *vma_lock_mapping(struct mm_struct *mm, struct sysmmu_prefbuf area[], int num_area) { return NULL; } #define vma_unlock_mapping(mapping) do { } while (0) #endif #ifdef CONFIG_PM_RUNTIME static int fimg2d4x_get_clk_cnt(struct clk *clk) { return __clk_is_enabled(clk); } #endif #ifdef CONFIG_EXYNOS7_IOMMU static void fimg2d4x_cleanup_pgtable(struct fimg2d_control *ctrl, struct fimg2d_bltcmd *cmd, enum image_object idx, bool plane2) { if (cmd->dma[idx].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, cmd->ctx->mm, cmd->dma[idx].base.addr, cmd->dma[idx].base.size); } if (plane2 && cmd->dma[idx].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, cmd->ctx->mm, cmd->dma[idx].plane2.addr, cmd->dma[idx].plane2.size); } } #else #define fimg2d4x_cleanup_pgtable(ctrl, cmd, idx, plane2) do { } while (0) #endif static int fimg2d4x_blit_wait(struct fimg2d_control *ctrl, struct fimg2d_bltcmd *cmd) { int ret; ret = wait_event_timeout(ctrl->wait_q, !atomic_read(&ctrl->busy), BLIT_TIMEOUT); if (!ret) { fimg2d_err("blit wait timeout\n"); fimg2d4x_disable_irq(ctrl); if (!fimg2d4x_blit_done_status(ctrl)) fimg2d_err("blit not finished\n"); fimg2d_debug_command(cmd); fimg2d4x_reset(ctrl); return -1; } return 0; } static void fimg2d4x_pre_bitblt(struct fimg2d_control *ctrl, struct fimg2d_bltcmd *cmd) { switch (ctrl->pdata->ip_ver) { case IP_VER_G2D_5AR2: /* disable cci path */ g2d_cci_snoop_control(ctrl->pdata->ip_ver, NON_SHAREABLE_PATH, SHARED_G2D_SEL); break; case IP_VER_G2D_5H: case IP_VER_G2D_5HP: #ifndef CCI_SNOOP /* disable cci path */ g2d_cci_snoop_control(ctrl->pdata->ip_ver, NON_SHAREABLE_PATH, SHARED_FROM_SYSMMU); fimg2d_debug("disable cci\n"); #endif #ifdef CCI_SNOOP /* enable cci path */ g2d_cci_snoop_control(ctrl->pdata->ip_ver, SHAREABLE_PATH, SHARED_G2D_SEL); fimg2d_debug("enable cci\n"); #endif break; default: fimg2d_err("g2d_cci_snoop_control is not called\n"); break; } } int fimg2d4x_bitblt(struct fimg2d_control *ctrl) { int ret = 0; enum addr_space addr_type; struct fimg2d_context *ctx; struct fimg2d_bltcmd *cmd; unsigned long *pgd; fimg2d_debug("%s : enter blitter\n", __func__); while (1) { cmd = fimg2d_get_command(ctrl); if (!cmd) break; ctx = cmd->ctx; ctx->state = CTX_READY; #ifdef CONFIG_PM_RUNTIME if (fimg2d4x_get_clk_cnt(ctrl->clock) == false) fimg2d_err("2D clock is not set\n"); #endif addr_type = cmd->image[IDST].addr.type; atomic_set(&ctrl->busy, 1); perf_start(cmd, PERF_SFR); ret = ctrl->configure(ctrl, cmd); perf_end(cmd, PERF_SFR); if (IS_ERR_VALUE(ret)) { fimg2d_err("failed to configure\n"); ctx->state = CTX_ERROR; goto fail_n_del; } ctx->vma_lock = vma_lock_mapping(ctx->mm, prefbuf, MAX_IMAGES - 1); if (fimg2d_check_pgd(ctx->mm, cmd)) { ret = -EFAULT; goto fail_n_unmap; } if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { if (!ctx->mm || !ctx->mm->pgd) { atomic_set(&ctrl->busy, 0); fimg2d_err("ctx->mm:0x%p or ctx->mm->pgd:0x%p\n", ctx->mm, (ctx->mm) ? ctx->mm->pgd : NULL); ret = -EPERM; goto fail_n_unmap; } pgd = (unsigned long *)ctx->mm->pgd; #ifdef CONFIG_EXYNOS7_IOMMU if (iovmm_activate(ctrl->dev)) { fimg2d_err("failed to iovmm activate\n"); ret = -EPERM; goto fail_n_unmap; } #else if (exynos_sysmmu_enable(ctrl->dev, (unsigned long)virt_to_phys(pgd))) { fimg2d_err("failed to sysmme enable\n"); ret = -EPERM; goto fail_n_unmap; } #endif fimg2d_debug("%s : sysmmu enable: pgd %p ctx %p seq_no(%u)\n", __func__, pgd, ctx, cmd->blt.seq_no); //exynos_sysmmu_set_pbuf(ctrl->dev, nbufs, prefbuf); fimg2d_debug("%s : set smmu prefbuf\n", __func__); } fimg2d4x_pre_bitblt(ctrl, cmd); perf_start(cmd, PERF_BLIT); /* start blit */ fimg2d_debug("%s : start blit\n", __func__); ctrl->run(ctrl); ret = fimg2d4x_blit_wait(ctrl, cmd); perf_end(cmd, PERF_BLIT); #ifdef CONFIG_EXYNOS7_IOMMU if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) iovmm_deactivate(ctrl->dev); #else if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) exynos_sysmmu_disable(ctrl->dev); #endif fail_n_unmap: perf_start(cmd, PERF_UNMAP); if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { fimg2d4x_cleanup_pgtable(ctrl, cmd, ISRC, true); fimg2d4x_cleanup_pgtable(ctrl, cmd, IMSK, false); fimg2d4x_cleanup_pgtable(ctrl, cmd, IDST, true); fimg2d_debug("sysmmu disable\n"); } perf_end(cmd, PERF_UNMAP); fail_n_del: vma_unlock_mapping(ctx->vma_lock); fimg2d_del_command(ctrl, cmd); } fimg2d_debug("%s : exit blitter\n", __func__); return ret; } static inline bool is_opaque(enum color_format fmt) { switch (fmt) { case CF_ARGB_8888: case CF_ARGB_1555: case CF_ARGB_4444: return false; default: return true; } } static int fast_op(struct fimg2d_bltcmd *cmd) { int fop; int sa, da, ga; struct fimg2d_param *p; struct fimg2d_image *src, *msk, *dst; p = &cmd->blt.param; src = &cmd->image[ISRC]; msk = &cmd->image[IMSK]; dst = &cmd->image[IDST]; fop = cmd->blt.op; if (msk->addr.type) return fop; ga = p->g_alpha; da = is_opaque(dst->fmt) ? 0xff : 0; if (!src->addr.type) sa = (p->solid_color >> 24) & 0xff; else sa = is_opaque(src->fmt) ? 0xff : 0; switch (cmd->blt.op) { case BLIT_OP_SRC_OVER: /* Sc + (1-Sa)*Dc = Sc */ if (sa == 0xff && ga == 0xff) fop = BLIT_OP_SRC; break; case BLIT_OP_DST_OVER: /* (1-Da)*Sc + Dc = Dc */ if (da == 0xff) fop = BLIT_OP_DST; /* nop */ break; case BLIT_OP_SRC_IN: /* Da*Sc = Sc */ if (da == 0xff) fop = BLIT_OP_SRC; break; case BLIT_OP_DST_IN: /* Sa*Dc = Dc */ if (sa == 0xff && ga == 0xff) fop = BLIT_OP_DST; /* nop */ break; case BLIT_OP_SRC_OUT: /* (1-Da)*Sc = 0 */ if (da == 0xff) fop = BLIT_OP_CLR; break; case BLIT_OP_DST_OUT: /* (1-Sa)*Dc = 0 */ if (sa == 0xff && ga == 0xff) fop = BLIT_OP_CLR; break; case BLIT_OP_SRC_ATOP: /* Da*Sc + (1-Sa)*Dc = Sc */ if (sa == 0xff && da == 0xff && ga == 0xff) fop = BLIT_OP_SRC; break; case BLIT_OP_DST_ATOP: /* (1-Da)*Sc + Sa*Dc = Dc */ if (sa == 0xff && da == 0xff && ga == 0xff) fop = BLIT_OP_DST; /* nop */ break; default: break; } if (fop == BLIT_OP_SRC && !src->addr.type && ga == 0xff) fop = BLIT_OP_SOLID_FILL; return fop; } static int fimg2d4x_configure(struct fimg2d_control *ctrl, struct fimg2d_bltcmd *cmd) { int op; enum image_sel srcsel, dstsel; struct fimg2d_param *p; struct fimg2d_image *src, *msk, *dst; struct sysmmu_prefbuf *pbuf; #ifdef CONFIG_EXYNOS7_IOMMU int ret; #endif fimg2d_debug("ctx %p seq_no(%u)\n", cmd->ctx, cmd->blt.seq_no); p = &cmd->blt.param; src = &cmd->image[ISRC]; msk = &cmd->image[IMSK]; dst = &cmd->image[IDST]; fimg2d4x_init(ctrl); /* src and dst select */ srcsel = dstsel = IMG_MEMORY; op = fast_op(cmd); switch (op) { case BLIT_OP_SOLID_FILL: srcsel = dstsel = IMG_FGCOLOR; fimg2d4x_set_fgcolor(ctrl, p->solid_color); break; case BLIT_OP_CLR: srcsel = dstsel = IMG_FGCOLOR; fimg2d4x_set_color_fill(ctrl, 0); break; case BLIT_OP_DST: srcsel = dstsel = IMG_FGCOLOR; break; default: if (!src->addr.type) { srcsel = IMG_FGCOLOR; fimg2d4x_set_fgcolor(ctrl, p->solid_color); } if (op == BLIT_OP_SRC) dstsel = IMG_FGCOLOR; fimg2d4x_enable_alpha(ctrl, p->g_alpha); fimg2d4x_set_alpha_composite(ctrl, op, p->g_alpha); if (p->premult == NON_PREMULTIPLIED) fimg2d4x_set_premultiplied(ctrl); break; } fimg2d4x_set_src_type(ctrl, srcsel); fimg2d4x_set_dst_type(ctrl, dstsel); nbufs = 0; pbuf = &prefbuf[nbufs]; /* src */ if (src->addr.type) { fimg2d4x_set_src_image(ctrl, src); fimg2d4x_set_src_rect(ctrl, &src->rect); fimg2d4x_set_src_repeat(ctrl, &p->repeat); if (p->scaling.mode) fimg2d4x_set_src_scaling(ctrl, &p->scaling, &p->repeat); /* prefbuf */ pbuf->base = cmd->dma[ISRC].base.addr; pbuf->size = cmd->dma[ISRC].base.size; pbuf->config = SYSMMU_PBUFCFG_DEFAULT_INPUT; nbufs++; pbuf++; if (src->order == P2_CRCB || src->order == P2_CBCR) { pbuf->base = cmd->dma[ISRC].plane2.addr; pbuf->size = cmd->dma[ISRC].plane2.size; pbuf->config = SYSMMU_PBUFCFG_DEFAULT_INPUT; nbufs++; pbuf++; } #ifdef CONFIG_EXYNOS7_IOMMU ret = exynos_sysmmu_map_user_pages( ctrl->dev, cmd->ctx->mm, cmd->dma[ISRC].base.addr, cmd->dma[ISRC].base.size, 0); if (IS_ERR_VALUE(ret)) { fimg2d_err("s/w fallback (%d-0:%d)\n", ISRC, ret); return ret; } if (src->order == P2_CRCB || src->order == P2_CBCR) { ret = exynos_sysmmu_map_user_pages( ctrl->dev, cmd->ctx->mm, cmd->dma[ISRC].plane2.addr, cmd->dma[ISRC].plane2.size, 0); if (IS_ERR_VALUE(ret)) { fimg2d_err("s/w fallback (%d-1:%d)\n", ISRC, ret); fimg2d4x_cleanup_pgtable(ctrl, cmd, ISRC, false); return ret; } } #endif } /* msk */ if (msk->addr.type) { fimg2d4x_enable_msk(ctrl); fimg2d4x_set_msk_image(ctrl, msk); fimg2d4x_set_msk_rect(ctrl, &msk->rect); fimg2d4x_set_msk_repeat(ctrl, &p->repeat); if (p->scaling.mode) fimg2d4x_set_msk_scaling(ctrl, &p->scaling, &p->repeat); /* prefbuf */ pbuf->base = cmd->dma[IMSK].base.addr; pbuf->size = cmd->dma[IMSK].base.size; pbuf->config = SYSMMU_PBUFCFG_DEFAULT_INPUT; nbufs++; pbuf++; #ifdef CONFIG_EXYNOS7_IOMMU ret = exynos_sysmmu_map_user_pages( ctrl->dev, cmd->ctx->mm, cmd->dma[IMSK].base.addr, cmd->dma[IMSK].base.size, 0); if (IS_ERR_VALUE(ret)) { fimg2d_err("s/w fallback (%d:%d)\n", IMSK, ret); fimg2d4x_cleanup_pgtable(ctrl, cmd, ISRC, true); return ret; } #endif } /* dst */ if (dst->addr.type) { fimg2d4x_set_dst_image(ctrl, dst); fimg2d4x_set_dst_rect(ctrl, &dst->rect); if (p->clipping.enable) fimg2d4x_enable_clipping(ctrl, &p->clipping); /* prefbuf */ pbuf->base = cmd->dma[IDST].base.addr; pbuf->size = cmd->dma[IDST].base.size; pbuf->config = SYSMMU_PBUFCFG_DEFAULT_OUTPUT; nbufs++; pbuf++; if (dst->order == P2_CRCB || dst->order == P2_CBCR) { pbuf->base = cmd->dma[IDST].plane2.addr; pbuf->size = cmd->dma[IDST].plane2.size; pbuf->config = SYSMMU_PBUFCFG_DEFAULT_OUTPUT; nbufs++; pbuf++; } #ifdef CONFIG_EXYNOS7_IOMMU ret = exynos_sysmmu_map_user_pages( ctrl->dev, cmd->ctx->mm, cmd->dma[IDST].base.addr, cmd->dma[IDST].base.size, 1); if (IS_ERR_VALUE(ret)) { fimg2d_err("s/w fallback (%d-0:%d)\n", IDST, ret); fimg2d4x_cleanup_pgtable(ctrl, cmd, ISRC, true); fimg2d4x_cleanup_pgtable(ctrl, cmd, IMSK, false); return ret; } if (dst->order == P2_CRCB || dst->order == P2_CBCR) { ret = exynos_sysmmu_map_user_pages( ctrl->dev, cmd->ctx->mm, cmd->dma[IDST].plane2.addr, cmd->dma[IDST].plane2.size, 1); if (IS_ERR_VALUE(ret)) { fimg2d_err("s/w fallback (%d-1:%d)\n", IDST, ret); fimg2d4x_cleanup_pgtable(ctrl, cmd, ISRC, true); fimg2d4x_cleanup_pgtable(ctrl, cmd, IMSK, false); fimg2d4x_cleanup_pgtable(ctrl, cmd, IDST, false); return ret; } } #endif } sysmmu_set_prefetch_buffer_by_region(ctrl->dev, prefbuf, nbufs); /* bluescreen */ if (p->bluscr.mode) fimg2d4x_set_bluescreen(ctrl, &p->bluscr); /* rotation */ if (p->rotate) fimg2d4x_set_rotation(ctrl, p->rotate); /* dithering */ if (p->dither) fimg2d4x_enable_dithering(ctrl); return 0; } static void fimg2d4x_run(struct fimg2d_control *ctrl) { fimg2d_debug("start blit\n"); fimg2d4x_enable_irq(ctrl); fimg2d4x_clear_irq(ctrl); fimg2d4x_start_blit(ctrl); } static void fimg2d4x_stop(struct fimg2d_control *ctrl) { if (fimg2d4x_is_blit_done(ctrl)) { fimg2d_debug("blit done\n"); fimg2d4x_disable_irq(ctrl); fimg2d4x_clear_irq(ctrl); atomic_set(&ctrl->busy, 0); wake_up(&ctrl->wait_q); } } static void fimg2d4x_dump(struct fimg2d_control *ctrl) { fimg2d4x_dump_regs(ctrl); } int fimg2d_register_ops(struct fimg2d_control *ctrl) { ctrl->blit = fimg2d4x_bitblt; ctrl->configure = fimg2d4x_configure; ctrl->run = fimg2d4x_run; ctrl->dump = fimg2d4x_dump; ctrl->stop = fimg2d4x_stop; return 0; }
gpl-2.0
Naoya-Horiguchi/linux
sound/soc/qcom/qdsp6/q6adm.c
62
15327
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. // Copyright (c) 2018, Linaro Limited #include <linux/device.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/soc/qcom/apr.h> #include <linux/wait.h> #include <sound/asound.h> #include "q6adm.h" #include "q6afe.h" #include "q6core.h" #include "q6dsp-common.h" #include "q6dsp-errno.h" #define ADM_CMD_DEVICE_OPEN_V5 0x00010326 #define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329 #define ADM_CMD_DEVICE_CLOSE_V5 0x00010327 #define ADM_CMD_MATRIX_MAP_ROUTINGS_V5 0x00010325 #define TIMEOUT_MS 1000 #define RESET_COPP_ID 99 #define INVALID_COPP_ID 0xFF /* Definition for a legacy device session. */ #define ADM_LEGACY_DEVICE_SESSION 0 #define ADM_MATRIX_ID_AUDIO_RX 0 #define ADM_MATRIX_ID_AUDIO_TX 1 struct q6copp { int afe_port; int copp_idx; int id; int topology; int mode; int rate; int bit_width; int channels; int app_type; int acdb_id; struct aprv2_ibasic_rsp_result_t result; struct kref refcount; wait_queue_head_t wait; struct list_head node; struct q6adm *adm; }; struct q6adm { struct apr_device *apr; struct device *dev; struct q6core_svc_api_info ainfo; unsigned long copp_bitmap[AFE_MAX_PORTS]; struct list_head copps_list; spinlock_t copps_list_lock; struct aprv2_ibasic_rsp_result_t result; struct mutex lock; wait_queue_head_t matrix_map_wait; }; struct q6adm_cmd_device_open_v5 { u16 flags; u16 mode_of_operation; u16 endpoint_id_1; u16 endpoint_id_2; u32 topology_id; u16 dev_num_channel; u16 bit_width; u32 sample_rate; u8 dev_channel_mapping[8]; } __packed; struct q6adm_cmd_matrix_map_routings_v5 { u32 matrix_id; u32 num_sessions; } __packed; struct q6adm_session_map_node_v5 { u16 session_id; u16 num_copps; } __packed; static struct q6copp *q6adm_find_copp(struct q6adm *adm, int port_idx, int copp_idx) { struct q6copp *c = NULL; struct q6copp *ret = NULL; unsigned long flags; spin_lock_irqsave(&adm->copps_list_lock, flags); list_for_each_entry(c, &adm->copps_list, node) { if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) { ret = c; kref_get(&c->refcount); break; } } spin_unlock_irqrestore(&adm->copps_list_lock, flags); return ret; } static void q6adm_free_copp(struct kref *ref) { struct q6copp *c = container_of(ref, struct q6copp, refcount); struct q6adm *adm = c->adm; unsigned long flags; spin_lock_irqsave(&adm->copps_list_lock, flags); clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]); list_del(&c->node); spin_unlock_irqrestore(&adm->copps_list_lock, flags); kfree(c); } static int q6adm_callback(struct apr_device *adev, struct apr_resp_pkt *data) { struct aprv2_ibasic_rsp_result_t *result = data->payload; int port_idx, copp_idx; struct apr_hdr *hdr = &data->hdr; struct q6copp *copp; struct q6adm *adm = dev_get_drvdata(&adev->dev); if (!data->payload_size) return 0; copp_idx = (hdr->token) & 0XFF; port_idx = ((hdr->token) >> 16) & 0xFF; if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { dev_err(&adev->dev, "Invalid port idx %d token %d\n", port_idx, hdr->token); return 0; } if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { dev_err(&adev->dev, "Invalid copp idx %d token %d\n", copp_idx, hdr->token); return 0; } switch (hdr->opcode) { case APR_BASIC_RSP_RESULT: { if (result->status != 0) { dev_err(&adev->dev, "cmd = 0x%x return error = 0x%x\n", result->opcode, result->status); } switch (result->opcode) { case ADM_CMD_DEVICE_OPEN_V5: case ADM_CMD_DEVICE_CLOSE_V5: copp = q6adm_find_copp(adm, port_idx, copp_idx); if (!copp) return 0; copp->result = *result; wake_up(&copp->wait); kref_put(&copp->refcount, q6adm_free_copp); break; case ADM_CMD_MATRIX_MAP_ROUTINGS_V5: adm->result = *result; wake_up(&adm->matrix_map_wait); break; default: dev_err(&adev->dev, "Unknown Cmd: 0x%x\n", result->opcode); break; } return 0; } case ADM_CMDRSP_DEVICE_OPEN_V5: { struct adm_cmd_rsp_device_open_v5 { u32 status; u16 copp_id; u16 reserved; } __packed * open = data->payload; copp = q6adm_find_copp(adm, port_idx, copp_idx); if (!copp) return 0; if (open->copp_id == INVALID_COPP_ID) { dev_err(&adev->dev, "Invalid coppid rxed %d\n", open->copp_id); copp->result.status = ADSP_EBADPARAM; wake_up(&copp->wait); kref_put(&copp->refcount, q6adm_free_copp); break; } copp->result.opcode = hdr->opcode; copp->id = open->copp_id; wake_up(&copp->wait); kref_put(&copp->refcount, q6adm_free_copp); } break; default: dev_err(&adev->dev, "Unknown cmd:0x%x\n", hdr->opcode); break; } return 0; } static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx) { struct q6copp *c; int idx; idx = find_first_zero_bit(&adm->copp_bitmap[port_idx], MAX_COPPS_PER_PORT); if (idx > MAX_COPPS_PER_PORT) return ERR_PTR(-EBUSY); c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) return ERR_PTR(-ENOMEM); set_bit(idx, &adm->copp_bitmap[port_idx]); c->copp_idx = idx; c->afe_port = port_idx; c->adm = adm; init_waitqueue_head(&c->wait); return c; } static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp, struct apr_pkt *pkt, uint32_t rsp_opcode) { struct device *dev = adm->dev; uint32_t opcode = pkt->hdr.opcode; int ret; mutex_lock(&adm->lock); copp->result.opcode = 0; copp->result.status = 0; ret = apr_send_pkt(adm->apr, pkt); if (ret < 0) { dev_err(dev, "Failed to send APR packet\n"); ret = -EINVAL; goto err; } /* Wait for the callback with copp id */ if (rsp_opcode) ret = wait_event_timeout(copp->wait, (copp->result.opcode == opcode) || (copp->result.opcode == rsp_opcode), msecs_to_jiffies(TIMEOUT_MS)); else ret = wait_event_timeout(copp->wait, (copp->result.opcode == opcode), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { dev_err(dev, "ADM copp cmd timedout\n"); ret = -ETIMEDOUT; } else if (copp->result.status > 0) { dev_err(dev, "DSP returned error[%d]\n", copp->result.status); ret = -EINVAL; } err: mutex_unlock(&adm->lock); return ret; } static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp, int port_id, int copp_idx) { struct apr_pkt close; close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); close.hdr.pkt_size = sizeof(close); close.hdr.src_port = port_id; close.hdr.dest_port = copp->id; close.hdr.token = port_id << 16 | copp_idx; close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5; return q6adm_apr_send_copp_pkt(adm, copp, &close, 0); } static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm, int port_id, int topology, int mode, int rate, int channel_mode, int bit_width, int app_type) { struct q6copp *c = NULL; struct q6copp *ret = NULL; unsigned long flags; spin_lock_irqsave(&adm->copps_list_lock, flags); list_for_each_entry(c, &adm->copps_list, node) { if ((port_id == c->afe_port) && (topology == c->topology) && (mode == c->mode) && (rate == c->rate) && (bit_width == c->bit_width) && (app_type == c->app_type)) { ret = c; kref_get(&c->refcount); } } spin_unlock_irqrestore(&adm->copps_list_lock, flags); return ret; } static int q6adm_device_open(struct q6adm *adm, struct q6copp *copp, int port_id, int path, int topology, int channel_mode, int bit_width, int rate) { struct q6adm_cmd_device_open_v5 *open; int afe_port = q6afe_get_port_id(port_id); struct apr_pkt *pkt; void *p; int ret, pkt_size; pkt_size = APR_HDR_SIZE + sizeof(*open); p = kzalloc(pkt_size, GFP_KERNEL); if (!p) return -ENOMEM; pkt = p; open = p + APR_HDR_SIZE; pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); pkt->hdr.pkt_size = pkt_size; pkt->hdr.src_port = afe_port; pkt->hdr.dest_port = afe_port; pkt->hdr.token = port_id << 16 | copp->copp_idx; pkt->hdr.opcode = ADM_CMD_DEVICE_OPEN_V5; open->flags = ADM_LEGACY_DEVICE_SESSION; open->mode_of_operation = path; open->endpoint_id_1 = afe_port; open->topology_id = topology; open->dev_num_channel = channel_mode & 0x00FF; open->bit_width = bit_width; open->sample_rate = rate; ret = q6dsp_map_channels(&open->dev_channel_mapping[0], channel_mode); if (ret) goto err; ret = q6adm_apr_send_copp_pkt(adm, copp, pkt, ADM_CMDRSP_DEVICE_OPEN_V5); err: kfree(pkt); return ret; } /** * q6adm_open() - open adm and grab a free copp * * @dev: Pointer to adm child device. * @port_id: port id * @path: playback or capture path. * @rate: rate at which copp is required. * @channel_mode: channel mode * @topology: adm topology id * @perf_mode: performace mode. * @bit_width: audio sample bit width * @app_type: Application type. * @acdb_id: ACDB id * * Return: Will be an negative on error or a valid copp pointer on success. */ struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate, int channel_mode, int topology, int perf_mode, uint16_t bit_width, int app_type, int acdb_id) { struct q6adm *adm = dev_get_drvdata(dev->parent); struct q6copp *copp; unsigned long flags; int ret = 0; if (port_id < 0) { dev_err(dev, "Invalid port_id 0x%x\n", port_id); return ERR_PTR(-EINVAL); } copp = q6adm_find_matching_copp(adm, port_id, topology, perf_mode, rate, channel_mode, bit_width, app_type); if (copp) { dev_err(dev, "Found Matching Copp 0x%x\n", copp->copp_idx); return copp; } spin_lock_irqsave(&adm->copps_list_lock, flags); copp = q6adm_alloc_copp(adm, port_id); if (IS_ERR(copp)) { spin_unlock_irqrestore(&adm->copps_list_lock, flags); return ERR_CAST(copp); } list_add_tail(&copp->node, &adm->copps_list); spin_unlock_irqrestore(&adm->copps_list_lock, flags); kref_init(&copp->refcount); copp->topology = topology; copp->mode = perf_mode; copp->rate = rate; copp->channels = channel_mode; copp->bit_width = bit_width; copp->app_type = app_type; ret = q6adm_device_open(adm, copp, port_id, path, topology, channel_mode, bit_width, rate); if (ret < 0) { kref_put(&copp->refcount, q6adm_free_copp); return ERR_PTR(ret); } return copp; } EXPORT_SYMBOL_GPL(q6adm_open); /** * q6adm_get_copp_id() - get copp index * * @copp: Pointer to valid copp * * Return: Will be an negative on error or a valid copp index on success. **/ int q6adm_get_copp_id(struct q6copp *copp) { if (!copp) return -EINVAL; return copp->copp_idx; } EXPORT_SYMBOL_GPL(q6adm_get_copp_id); /** * q6adm_matrix_map() - Map asm streams and afe ports using payload * * @dev: Pointer to adm child device. * @path: playback or capture path. * @payload_map: map between session id and afe ports. * @perf_mode: Performace mode. * * Return: Will be an negative on error or a zero on success. */ int q6adm_matrix_map(struct device *dev, int path, struct route_payload payload_map, int perf_mode) { struct q6adm *adm = dev_get_drvdata(dev->parent); struct q6adm_cmd_matrix_map_routings_v5 *route; struct q6adm_session_map_node_v5 *node; struct apr_pkt *pkt; uint16_t *copps_list; int pkt_size, ret, i, copp_idx; void *matrix_map = NULL; struct q6copp *copp; /* Assumes port_ids have already been validated during adm_open */ pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) + (sizeof(uint32_t) * payload_map.num_copps)); matrix_map = kzalloc(pkt_size, GFP_KERNEL); if (!matrix_map) return -ENOMEM; pkt = matrix_map; route = matrix_map + APR_HDR_SIZE; node = matrix_map + APR_HDR_SIZE + sizeof(*route); copps_list = matrix_map + APR_HDR_SIZE + sizeof(*route) + sizeof(*node); pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); pkt->hdr.pkt_size = pkt_size; pkt->hdr.token = 0; pkt->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5; route->num_sessions = 1; switch (path) { case ADM_PATH_PLAYBACK: route->matrix_id = ADM_MATRIX_ID_AUDIO_RX; break; case ADM_PATH_LIVE_REC: route->matrix_id = ADM_MATRIX_ID_AUDIO_TX; break; default: dev_err(dev, "Wrong path set[%d]\n", path); break; } node->session_id = payload_map.session_id; node->num_copps = payload_map.num_copps; for (i = 0; i < payload_map.num_copps; i++) { int port_idx = payload_map.port_id[i]; if (port_idx < 0) { dev_err(dev, "Invalid port_id 0x%x\n", payload_map.port_id[i]); kfree(pkt); return -EINVAL; } copp_idx = payload_map.copp_idx[i]; copp = q6adm_find_copp(adm, port_idx, copp_idx); if (!copp) { kfree(pkt); return -EINVAL; } copps_list[i] = copp->id; kref_put(&copp->refcount, q6adm_free_copp); } mutex_lock(&adm->lock); adm->result.status = 0; adm->result.opcode = 0; ret = apr_send_pkt(adm->apr, pkt); if (ret < 0) { dev_err(dev, "routing for stream %d failed ret %d\n", payload_map.session_id, ret); goto fail_cmd; } ret = wait_event_timeout(adm->matrix_map_wait, adm->result.opcode == pkt->hdr.opcode, msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { dev_err(dev, "routing for stream %d failed\n", payload_map.session_id); ret = -ETIMEDOUT; goto fail_cmd; } else if (adm->result.status > 0) { dev_err(dev, "DSP returned error[%d]\n", adm->result.status); ret = -EINVAL; goto fail_cmd; } fail_cmd: mutex_unlock(&adm->lock); kfree(pkt); return ret; } EXPORT_SYMBOL_GPL(q6adm_matrix_map); /** * q6adm_close() - Close adm copp * * @dev: Pointer to adm child device. * @copp: pointer to previously opened copp * * Return: Will be an negative on error or a zero on success. */ int q6adm_close(struct device *dev, struct q6copp *copp) { struct q6adm *adm = dev_get_drvdata(dev->parent); int ret = 0; ret = q6adm_device_close(adm, copp, copp->afe_port, copp->copp_idx); if (ret < 0) { dev_err(adm->dev, "Failed to close copp %d\n", ret); return ret; } kref_put(&copp->refcount, q6adm_free_copp); return 0; } EXPORT_SYMBOL_GPL(q6adm_close); static int q6adm_probe(struct apr_device *adev) { struct device *dev = &adev->dev; struct q6adm *adm; adm = devm_kzalloc(dev, sizeof(*adm), GFP_KERNEL); if (!adm) return -ENOMEM; adm->apr = adev; dev_set_drvdata(dev, adm); adm->dev = dev; q6core_get_svc_api_info(adev->svc_id, &adm->ainfo); mutex_init(&adm->lock); init_waitqueue_head(&adm->matrix_map_wait); INIT_LIST_HEAD(&adm->copps_list); spin_lock_init(&adm->copps_list_lock); return devm_of_platform_populate(dev); } #ifdef CONFIG_OF static const struct of_device_id q6adm_device_id[] = { { .compatible = "qcom,q6adm" }, {}, }; MODULE_DEVICE_TABLE(of, q6adm_device_id); #endif static struct apr_driver qcom_q6adm_driver = { .probe = q6adm_probe, .callback = q6adm_callback, .driver = { .name = "qcom-q6adm", .of_match_table = of_match_ptr(q6adm_device_id), }, }; module_apr_driver(qcom_q6adm_driver); MODULE_DESCRIPTION("Q6 Audio Device Manager"); MODULE_LICENSE("GPL v2");
gpl-2.0
svenkatr/linux
sound/soc/fsl/p1022_rdk.c
318
11282
/** * Freescale P1022RDK ALSA SoC Machine driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2012 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * Note: in order for audio to work correctly, the output controls need * to be enabled, because they control the clock. So for playback, for * example: * * amixer sset 'Left Output Mixer PCM' on * amixer sset 'Right Output Mixer PCM' on */ #include <linux/module.h> #include <linux/fsl/guts.h> #include <linux/interrupt.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/slab.h> #include <sound/soc.h> #include "fsl_dma.h" #include "fsl_ssi.h" #include "fsl_utils.h" /* P1022-specific PMUXCR and DMUXCR bit definitions */ #define CCSR_GUTS_PMUXCR_UART0_I2C1_MASK 0x0001c000 #define CCSR_GUTS_PMUXCR_UART0_I2C1_UART0_SSI 0x00010000 #define CCSR_GUTS_PMUXCR_UART0_I2C1_SSI 0x00018000 #define CCSR_GUTS_PMUXCR_SSI_DMA_TDM_MASK 0x00000c00 #define CCSR_GUTS_PMUXCR_SSI_DMA_TDM_SSI 0x00000000 #define CCSR_GUTS_DMUXCR_PAD 1 /* DMA controller/channel set to pad */ #define CCSR_GUTS_DMUXCR_SSI 2 /* DMA controller/channel set to SSI */ /* * Set the DMACR register in the GUTS * * The DMACR register determines the source of initiated transfers for each * channel on each DMA controller. Rather than have a bunch of repetitive * macros for the bit patterns, we just have a function that calculates * them. * * guts: Pointer to GUTS structure * co: The DMA controller (0 or 1) * ch: The channel on the DMA controller (0, 1, 2, or 3) * device: The device to set as the target (CCSR_GUTS_DMUXCR_xxx) */ static inline void guts_set_dmuxcr(struct ccsr_guts __iomem *guts, unsigned int co, unsigned int ch, unsigned int device) { unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); clrsetbits_be32(&guts->dmuxcr, 3 << shift, device << shift); } /* There's only one global utilities register */ static phys_addr_t guts_phys; /** * machine_data: machine-specific ASoC device data * * This structure contains data for a single sound platform device on an * P1022 RDK. Some of the data is taken from the device tree. */ struct machine_data { struct snd_soc_dai_link dai[2]; struct snd_soc_card card; unsigned int dai_format; unsigned int codec_clk_direction; unsigned int cpu_clk_direction; unsigned int clk_frequency; unsigned int dma_id[2]; /* 0 = DMA1, 1 = DMA2, etc */ unsigned int dma_channel_id[2]; /* 0 = ch 0, 1 = ch 1, etc*/ char platform_name[2][DAI_NAME_SIZE]; /* One for each DMA channel */ }; /** * p1022_rdk_machine_probe: initialize the board * * This function is used to initialize the board-specific hardware. * * Here we program the DMACR and PMUXCR registers. */ static int p1022_rdk_machine_probe(struct snd_soc_card *card) { struct machine_data *mdata = container_of(card, struct machine_data, card); struct ccsr_guts __iomem *guts; guts = ioremap(guts_phys, sizeof(struct ccsr_guts)); if (!guts) { dev_err(card->dev, "could not map global utilities\n"); return -ENOMEM; } /* Enable SSI Tx signal */ clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_UART0_I2C1_MASK, CCSR_GUTS_PMUXCR_UART0_I2C1_UART0_SSI); /* Enable SSI Rx signal */ clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI_DMA_TDM_MASK, CCSR_GUTS_PMUXCR_SSI_DMA_TDM_SSI); /* Enable DMA Channel for SSI */ guts_set_dmuxcr(guts, mdata->dma_id[0], mdata->dma_channel_id[0], CCSR_GUTS_DMUXCR_SSI); guts_set_dmuxcr(guts, mdata->dma_id[1], mdata->dma_channel_id[1], CCSR_GUTS_DMUXCR_SSI); iounmap(guts); return 0; } /** * p1022_rdk_startup: program the board with various hardware parameters * * This function takes board-specific information, like clock frequencies * and serial data formats, and passes that information to the codec and * transport drivers. */ static int p1022_rdk_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct machine_data *mdata = container_of(rtd->card, struct machine_data, card); struct device *dev = rtd->card->dev; int ret = 0; /* Tell the codec driver what the serial protocol is. */ ret = snd_soc_dai_set_fmt(rtd->codec_dai, mdata->dai_format); if (ret < 0) { dev_err(dev, "could not set codec driver audio format (ret=%i)\n", ret); return ret; } ret = snd_soc_dai_set_pll(rtd->codec_dai, 0, 0, mdata->clk_frequency, mdata->clk_frequency); if (ret < 0) { dev_err(dev, "could not set codec PLL frequency (ret=%i)\n", ret); return ret; } return 0; } /** * p1022_rdk_machine_remove: Remove the sound device * * This function is called to remove the sound device for one SSI. We * de-program the DMACR and PMUXCR register. */ static int p1022_rdk_machine_remove(struct snd_soc_card *card) { struct machine_data *mdata = container_of(card, struct machine_data, card); struct ccsr_guts __iomem *guts; guts = ioremap(guts_phys, sizeof(struct ccsr_guts)); if (!guts) { dev_err(card->dev, "could not map global utilities\n"); return -ENOMEM; } /* Restore the signal routing */ clrbits32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_UART0_I2C1_MASK); clrbits32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI_DMA_TDM_MASK); guts_set_dmuxcr(guts, mdata->dma_id[0], mdata->dma_channel_id[0], 0); guts_set_dmuxcr(guts, mdata->dma_id[1], mdata->dma_channel_id[1], 0); iounmap(guts); return 0; } /** * p1022_rdk_ops: ASoC machine driver operations */ static struct snd_soc_ops p1022_rdk_ops = { .startup = p1022_rdk_startup, }; /** * p1022_rdk_probe: platform probe function for the machine driver * * Although this is a machine driver, the SSI node is the "master" node with * respect to audio hardware connections. Therefore, we create a new ASoC * device for each new SSI node that has a codec attached. */ static int p1022_rdk_probe(struct platform_device *pdev) { struct device *dev = pdev->dev.parent; /* ssi_pdev is the platform device for the SSI node that probed us */ struct platform_device *ssi_pdev = to_platform_device(dev); struct device_node *np = ssi_pdev->dev.of_node; struct device_node *codec_np = NULL; struct machine_data *mdata; const u32 *iprop; int ret; /* Find the codec node for this SSI. */ codec_np = of_parse_phandle(np, "codec-handle", 0); if (!codec_np) { dev_err(dev, "could not find codec node\n"); return -EINVAL; } mdata = kzalloc(sizeof(struct machine_data), GFP_KERNEL); if (!mdata) { ret = -ENOMEM; goto error_put; } mdata->dai[0].cpu_dai_name = dev_name(&ssi_pdev->dev); mdata->dai[0].ops = &p1022_rdk_ops; /* ASoC core can match codec with device node */ mdata->dai[0].codec_of_node = codec_np; /* * We register two DAIs per SSI, one for playback and the other for * capture. We support codecs that have separate DAIs for both playback * and capture. */ memcpy(&mdata->dai[1], &mdata->dai[0], sizeof(struct snd_soc_dai_link)); /* The DAI names from the codec (snd_soc_dai_driver.name) */ mdata->dai[0].codec_dai_name = "wm8960-hifi"; mdata->dai[1].codec_dai_name = mdata->dai[0].codec_dai_name; /* * Configure the SSI for I2S slave mode. Older device trees have * an fsl,mode property, but we ignore that since there's really * only one way to configure the SSI. */ mdata->dai_format = SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM; mdata->codec_clk_direction = SND_SOC_CLOCK_OUT; mdata->cpu_clk_direction = SND_SOC_CLOCK_IN; /* * In i2s-slave mode, the codec has its own clock source, so we * need to get the frequency from the device tree and pass it to * the codec driver. */ iprop = of_get_property(codec_np, "clock-frequency", NULL); if (!iprop || !*iprop) { dev_err(&pdev->dev, "codec bus-frequency property is missing or invalid\n"); ret = -EINVAL; goto error; } mdata->clk_frequency = be32_to_cpup(iprop); if (!mdata->clk_frequency) { dev_err(&pdev->dev, "unknown clock frequency\n"); ret = -EINVAL; goto error; } /* Find the playback DMA channel to use. */ mdata->dai[0].platform_name = mdata->platform_name[0]; ret = fsl_asoc_get_dma_channel(np, "fsl,playback-dma", &mdata->dai[0], &mdata->dma_channel_id[0], &mdata->dma_id[0]); if (ret) { dev_err(&pdev->dev, "missing/invalid playback DMA phandle (ret=%i)\n", ret); goto error; } /* Find the capture DMA channel to use. */ mdata->dai[1].platform_name = mdata->platform_name[1]; ret = fsl_asoc_get_dma_channel(np, "fsl,capture-dma", &mdata->dai[1], &mdata->dma_channel_id[1], &mdata->dma_id[1]); if (ret) { dev_err(&pdev->dev, "missing/invalid capture DMA phandle (ret=%i)\n", ret); goto error; } /* Initialize our DAI data structure. */ mdata->dai[0].stream_name = "playback"; mdata->dai[1].stream_name = "capture"; mdata->dai[0].name = mdata->dai[0].stream_name; mdata->dai[1].name = mdata->dai[1].stream_name; mdata->card.probe = p1022_rdk_machine_probe; mdata->card.remove = p1022_rdk_machine_remove; mdata->card.name = pdev->name; /* The platform driver name */ mdata->card.owner = THIS_MODULE; mdata->card.dev = &pdev->dev; mdata->card.num_links = 2; mdata->card.dai_link = mdata->dai; /* Register with ASoC */ ret = snd_soc_register_card(&mdata->card); if (ret) { dev_err(&pdev->dev, "could not register card (ret=%i)\n", ret); goto error; } return 0; error: kfree(mdata); error_put: of_node_put(codec_np); return ret; } /** * p1022_rdk_remove: remove the platform device * * This function is called when the platform device is removed. */ static int p1022_rdk_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); struct machine_data *mdata = container_of(card, struct machine_data, card); snd_soc_unregister_card(card); kfree(mdata); return 0; } static struct platform_driver p1022_rdk_driver = { .probe = p1022_rdk_probe, .remove = p1022_rdk_remove, .driver = { /* * The name must match 'compatible' property in the device tree, * in lowercase letters. */ .name = "snd-soc-p1022rdk", }, }; /** * p1022_rdk_init: machine driver initialization. * * This function is called when this module is loaded. */ static int __init p1022_rdk_init(void) { struct device_node *guts_np; struct resource res; /* Get the physical address of the global utilities registers */ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); if (of_address_to_resource(guts_np, 0, &res)) { pr_err("snd-soc-p1022rdk: missing/invalid global utils node\n"); of_node_put(guts_np); return -EINVAL; } guts_phys = res.start; of_node_put(guts_np); return platform_driver_register(&p1022_rdk_driver); } /** * p1022_rdk_exit: machine driver exit * * This function is called when this driver is unloaded. */ static void __exit p1022_rdk_exit(void) { platform_driver_unregister(&p1022_rdk_driver); } late_initcall(p1022_rdk_init); module_exit(p1022_rdk_exit); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale / iVeia P1022 RDK ALSA SoC machine driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
aceofall/linux-kernel
drivers/video/backlight/lm3533_bl.c
318
8993
/* * lm3533-bl.c -- LM3533 Backlight driver * * Copyright (C) 2011-2012 Texas Instruments * * Author: Johan Hovold <jhovold@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/slab.h> #include <linux/mfd/lm3533.h> #define LM3533_HVCTRLBANK_COUNT 2 #define LM3533_BL_MAX_BRIGHTNESS 255 #define LM3533_REG_CTRLBANK_AB_BCONF 0x1a struct lm3533_bl { struct lm3533 *lm3533; struct lm3533_ctrlbank cb; struct backlight_device *bd; int id; }; static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl) { return bl->id; } static int lm3533_bl_update_status(struct backlight_device *bd) { struct lm3533_bl *bl = bl_get_data(bd); int brightness = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) brightness = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness); } static int lm3533_bl_get_brightness(struct backlight_device *bd) { struct lm3533_bl *bl = bl_get_data(bd); u8 val; int ret; ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val); if (ret) return ret; return val; } static const struct backlight_ops lm3533_bl_ops = { .get_brightness = lm3533_bl_get_brightness, .update_status = lm3533_bl_update_status, }; static ssize_t show_id(struct device *dev, struct device_attribute *attr, char *buf) { struct lm3533_bl *bl = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id); } static ssize_t show_als_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct lm3533_bl *bl = dev_get_drvdata(dev); unsigned channel = lm3533_bl_get_ctrlbank_id(bl); return scnprintf(buf, PAGE_SIZE, "%u\n", channel); } static ssize_t show_als_en(struct device *dev, struct device_attribute *attr, char *buf) { struct lm3533_bl *bl = dev_get_drvdata(dev); int ctrlbank = lm3533_bl_get_ctrlbank_id(bl); u8 val; u8 mask; bool enable; int ret; ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val); if (ret) return ret; mask = 1 << (2 * ctrlbank); enable = val & mask; return scnprintf(buf, PAGE_SIZE, "%d\n", enable); } static ssize_t store_als_en(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lm3533_bl *bl = dev_get_drvdata(dev); int ctrlbank = lm3533_bl_get_ctrlbank_id(bl); int enable; u8 val; u8 mask; int ret; if (kstrtoint(buf, 0, &enable)) return -EINVAL; mask = 1 << (2 * ctrlbank); if (enable) val = mask; else val = 0; ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val, mask); if (ret) return ret; return len; } static ssize_t show_linear(struct device *dev, struct device_attribute *attr, char *buf) { struct lm3533_bl *bl = dev_get_drvdata(dev); u8 val; u8 mask; int linear; int ret; ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val); if (ret) return ret; mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1); if (val & mask) linear = 1; else linear = 0; return scnprintf(buf, PAGE_SIZE, "%x\n", linear); } static ssize_t store_linear(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lm3533_bl *bl = dev_get_drvdata(dev); unsigned long linear; u8 mask; u8 val; int ret; if (kstrtoul(buf, 0, &linear)) return -EINVAL; mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1); if (linear) val = mask; else val = 0; ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val, mask); if (ret) return ret; return len; } static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct lm3533_bl *bl = dev_get_drvdata(dev); u8 val; int ret; ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val); if (ret) return ret; return scnprintf(buf, PAGE_SIZE, "%u\n", val); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lm3533_bl *bl = dev_get_drvdata(dev); u8 val; int ret; if (kstrtou8(buf, 0, &val)) return -EINVAL; ret = lm3533_ctrlbank_set_pwm(&bl->cb, val); if (ret) return ret; return len; } static LM3533_ATTR_RO(als_channel); static LM3533_ATTR_RW(als_en); static LM3533_ATTR_RO(id); static LM3533_ATTR_RW(linear); static LM3533_ATTR_RW(pwm); static struct attribute *lm3533_bl_attributes[] = { &dev_attr_als_channel.attr, &dev_attr_als_en.attr, &dev_attr_id.attr, &dev_attr_linear.attr, &dev_attr_pwm.attr, NULL, }; static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct lm3533_bl *bl = dev_get_drvdata(dev); umode_t mode = attr->mode; if (attr == &dev_attr_als_channel.attr || attr == &dev_attr_als_en.attr) { if (!bl->lm3533->have_als) mode = 0; } return mode; }; static struct attribute_group lm3533_bl_attribute_group = { .is_visible = lm3533_bl_attr_is_visible, .attrs = lm3533_bl_attributes }; static int lm3533_bl_setup(struct lm3533_bl *bl, struct lm3533_bl_platform_data *pdata) { int ret; ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current); if (ret) return ret; return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm); } static int lm3533_bl_probe(struct platform_device *pdev) { struct lm3533 *lm3533; struct lm3533_bl_platform_data *pdata; struct lm3533_bl *bl; struct backlight_device *bd; struct backlight_properties props; int ret; dev_dbg(&pdev->dev, "%s\n", __func__); lm3533 = dev_get_drvdata(pdev->dev.parent); if (!lm3533) return -EINVAL; pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); return -EINVAL; } if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) { dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id); return -EINVAL; } bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL); if (!bl) { dev_err(&pdev->dev, "failed to allocate memory for backlight\n"); return -ENOMEM; } bl->lm3533 = lm3533; bl->id = pdev->id; bl->cb.lm3533 = lm3533; bl->cb.id = lm3533_bl_get_ctrlbank_id(bl); bl->cb.dev = NULL; /* until registered */ memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = LM3533_BL_MAX_BRIGHTNESS; props.brightness = pdata->default_brightness; bd = devm_backlight_device_register(&pdev->dev, pdata->name, pdev->dev.parent, bl, &lm3533_bl_ops, &props); if (IS_ERR(bd)) { dev_err(&pdev->dev, "failed to register backlight device\n"); return PTR_ERR(bd); } bl->bd = bd; bl->cb.dev = &bl->bd->dev; platform_set_drvdata(pdev, bl); ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group); if (ret < 0) { dev_err(&pdev->dev, "failed to create sysfs attributes\n"); return ret; } backlight_update_status(bd); ret = lm3533_bl_setup(bl, pdata); if (ret) goto err_sysfs_remove; ret = lm3533_ctrlbank_enable(&bl->cb); if (ret) goto err_sysfs_remove; return 0; err_sysfs_remove: sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group); return ret; } static int lm3533_bl_remove(struct platform_device *pdev) { struct lm3533_bl *bl = platform_get_drvdata(pdev); struct backlight_device *bd = bl->bd; dev_dbg(&bd->dev, "%s\n", __func__); bd->props.power = FB_BLANK_POWERDOWN; bd->props.brightness = 0; lm3533_ctrlbank_disable(&bl->cb); sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group); return 0; } #ifdef CONFIG_PM_SLEEP static int lm3533_bl_suspend(struct device *dev) { struct lm3533_bl *bl = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); return lm3533_ctrlbank_disable(&bl->cb); } static int lm3533_bl_resume(struct device *dev) { struct lm3533_bl *bl = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); return lm3533_ctrlbank_enable(&bl->cb); } #endif static SIMPLE_DEV_PM_OPS(lm3533_bl_pm_ops, lm3533_bl_suspend, lm3533_bl_resume); static void lm3533_bl_shutdown(struct platform_device *pdev) { struct lm3533_bl *bl = platform_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&bl->cb); } static struct platform_driver lm3533_bl_driver = { .driver = { .name = "lm3533-backlight", .owner = THIS_MODULE, .pm = &lm3533_bl_pm_ops, }, .probe = lm3533_bl_probe, .remove = lm3533_bl_remove, .shutdown = lm3533_bl_shutdown, }; module_platform_driver(lm3533_bl_driver); MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>"); MODULE_DESCRIPTION("LM3533 Backlight driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lm3533-backlight");
gpl-2.0
Silentlys/android_kernel_qcom_msm8916
drivers/usb/gadget/ci13xxx_msm.c
318
12036
/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/usb/msm_hsusb_hw.h> #include <linux/usb/ulpi.h> #include <linux/gpio.h> #include <linux/pinctrl/consumer.h> #include "ci13xxx_udc.c" #define MSM_USB_BASE (udc->regs) #define CI13XXX_MSM_MAX_LOG2_ITC 7 struct ci13xxx_udc_context { int irq; void __iomem *regs; int wake_gpio; int wake_irq; bool wake_irq_state; struct pinctrl *ci13xxx_pinctrl; }; static struct ci13xxx_udc_context _udc_ctxt; static irqreturn_t msm_udc_irq(int irq, void *data) { return udc_irq(); } static void ci13xxx_msm_suspend(void) { struct device *dev = _udc->gadget.dev.parent; dev_dbg(dev, "ci13xxx_msm_suspend\n"); if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) { enable_irq_wake(_udc_ctxt.wake_irq); enable_irq(_udc_ctxt.wake_irq); _udc_ctxt.wake_irq_state = true; } } static void ci13xxx_msm_resume(void) { struct device *dev = _udc->gadget.dev.parent; dev_dbg(dev, "ci13xxx_msm_resume\n"); if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) { disable_irq_wake(_udc_ctxt.wake_irq); disable_irq_nosync(_udc_ctxt.wake_irq); _udc_ctxt.wake_irq_state = false; } } static void ci13xxx_msm_disconnect(void) { struct ci13xxx *udc = _udc; struct usb_phy *phy = udc->transceiver; if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) { u32 temp; usb_phy_io_write(phy, ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL, ULPI_CLR(ULPI_MISC_A)); /* Notify LINK of VBUS LOW */ temp = readl_relaxed(USB_USBCMD); temp &= ~USBCMD_SESS_VLD_CTRL; writel_relaxed(temp, USB_USBCMD); /* * Add memory barrier as it is must to complete * above USB PHY and Link register writes before * moving ahead with USB peripheral mode enumeration, * otherwise USB peripheral mode may not work. */ mb(); } } /* Link power management will reduce power consumption by * short time HW suspend/resume. */ static void ci13xxx_msm_set_l1(struct ci13xxx *udc) { int temp; struct device *dev = udc->gadget.dev.parent; dev_dbg(dev, "Enable link power management\n"); /* Enable remote wakeup and L1 for IN EPs */ writel_relaxed(0xffff0000, USB_L1_EP_CTRL); temp = readl_relaxed(USB_L1_CONFIG); temp |= L1_CONFIG_LPM_EN | L1_CONFIG_REMOTE_WAKEUP | L1_CONFIG_GATE_SYS_CLK | L1_CONFIG_PHY_LPM | L1_CONFIG_PLL; writel_relaxed(temp, USB_L1_CONFIG); } static void ci13xxx_msm_connect(void) { struct ci13xxx *udc = _udc; struct usb_phy *phy = udc->transceiver; if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) { int temp; usb_phy_io_write(phy, ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL, ULPI_SET(ULPI_MISC_A)); temp = readl_relaxed(USB_GENCONFIG2); temp |= GENCFG2_SESS_VLD_CTRL_EN; writel_relaxed(temp, USB_GENCONFIG2); temp = readl_relaxed(USB_USBCMD); temp |= USBCMD_SESS_VLD_CTRL; writel_relaxed(temp, USB_USBCMD); /* * Add memory barrier as it is must to complete * above USB PHY and Link register writes before * moving ahead with USB peripheral mode enumeration, * otherwise USB peripheral mode may not work. */ mb(); } } static void ci13xxx_msm_reset(void) { struct ci13xxx *udc = _udc; struct usb_phy *phy = udc->transceiver; struct device *dev = udc->gadget.dev.parent; int temp; writel_relaxed(0, USB_AHBBURST); writel_relaxed(0x08, USB_AHBMODE); /* workaround for rx buffer collision issue */ temp = readl_relaxed(USB_GENCONFIG); temp &= ~GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE; writel_relaxed(temp, USB_GENCONFIG); if (udc->gadget.l1_supported) ci13xxx_msm_set_l1(udc); if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) { int temp; dev_dbg(dev, "using secondary hsphy\n"); temp = readl_relaxed(USB_PHY_CTRL2); temp |= (1<<16); writel_relaxed(temp, USB_PHY_CTRL2); /* * Add memory barrier to make sure above LINK writes are * complete before moving ahead with USB peripheral mode * enumeration. */ mb(); } } static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event) { struct device *dev = udc->gadget.dev.parent; switch (event) { case CI13XXX_CONTROLLER_RESET_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n"); ci13xxx_msm_reset(); break; case CI13XXX_CONTROLLER_DISCONNECT_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n"); ci13xxx_msm_disconnect(); ci13xxx_msm_resume(); break; case CI13XXX_CONTROLLER_CONNECT_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_CONNECT_EVENT received\n"); ci13xxx_msm_connect(); break; case CI13XXX_CONTROLLER_SUSPEND_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n"); ci13xxx_msm_suspend(); break; case CI13XXX_CONTROLLER_RESUME_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n"); ci13xxx_msm_resume(); break; default: dev_dbg(dev, "unknown ci13xxx_udc event\n"); break; } } static bool ci13xxx_msm_in_lpm(struct ci13xxx *udc) { struct msm_otg *otg; if (udc == NULL) return false; if (udc->transceiver == NULL) return false; otg = container_of(udc->transceiver, struct msm_otg, phy); return (atomic_read(&otg->in_lpm) != 0); } static void ci13xxx_msm_set_fpr_flag(struct ci13xxx *udc) { struct msm_otg *otg; if (udc == NULL) return; if (udc->transceiver == NULL) return; otg = container_of(udc->transceiver, struct msm_otg, phy); atomic_set(&otg->set_fpr_with_lpm_exit, 1); } static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data) { struct ci13xxx *udc = _udc; if (udc->transceiver && udc->vbus_active && udc->suspended) usb_phy_set_suspend(udc->transceiver, 0); else if (!udc->suspended) ci13xxx_msm_resume(); return IRQ_HANDLED; } static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = { .name = "ci13xxx_msm", .flags = CI13XXX_REGS_SHARED | CI13XXX_REQUIRE_TRANSCEIVER | CI13XXX_PULLUP_ON_VBUS | CI13XXX_ZERO_ITC | CI13XXX_DISABLE_STREAMING | CI13XXX_IS_OTG, .nz_itc = 0, .notify_event = ci13xxx_msm_notify_event, .in_lpm = ci13xxx_msm_in_lpm, .set_fpr_flag = ci13xxx_msm_set_fpr_flag, }; static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev, struct resource *res) { int wake_irq; int ret; struct pinctrl_state *set_state; dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n"); _udc_ctxt.wake_gpio = res->start; if (_udc_ctxt.ci13xxx_pinctrl) { set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl, "ci13xxx_active"); if (IS_ERR(set_state)) { pr_err("cannot get ci13xxx pinctrl active state\n"); return PTR_ERR(set_state); } pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl, set_state); } gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME"); gpio_direction_input(_udc_ctxt.wake_gpio); wake_irq = gpio_to_irq(_udc_ctxt.wake_gpio); if (wake_irq < 0) { dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n"); return -ENXIO; } dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n", _udc_ctxt.wake_gpio, wake_irq); ret = request_irq(wake_irq, ci13xxx_msm_resume_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "usb resume", NULL); if (ret < 0) { dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n"); goto gpio_free; } disable_irq(wake_irq); _udc_ctxt.wake_irq = wake_irq; return 0; gpio_free: gpio_free(_udc_ctxt.wake_gpio); if (_udc_ctxt.ci13xxx_pinctrl) { set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl, "ci13xxx_sleep"); if (IS_ERR(set_state)) pr_err("cannot get ci13xxx pinctrl sleep state\n"); else pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl, set_state); } _udc_ctxt.wake_gpio = 0; return ret; } static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev) { struct pinctrl_state *set_state; dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n"); if (_udc_ctxt.wake_gpio) { gpio_free(_udc_ctxt.wake_gpio); if (_udc_ctxt.ci13xxx_pinctrl) { set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl, "ci13xxx_sleep"); if (IS_ERR(set_state)) pr_err("cannot get ci13xxx pinctrl sleep state\n"); else pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl, set_state); } _udc_ctxt.wake_gpio = 0; } } static int ci13xxx_msm_probe(struct platform_device *pdev) { struct resource *res; int ret; struct ci13xxx_platform_data *pdata = pdev->dev.platform_data; bool is_l1_supported = false; dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n"); if (pdata) { /* Acceptable values for nz_itc are: 0,1,2,4,8,16,32,64 */ if (pdata->log2_itc > CI13XXX_MSM_MAX_LOG2_ITC || pdata->log2_itc <= 0) ci13xxx_msm_udc_driver.nz_itc = 0; else ci13xxx_msm_udc_driver.nz_itc = 1 << (pdata->log2_itc-1); is_l1_supported = pdata->l1_supported; /* Set ahb2ahb bypass flag if it is requested. */ if (pdata->enable_ahb2ahb_bypass) ci13xxx_msm_udc_driver.flags |= CI13XXX_ENABLE_AHB2AHB_BYPASS; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get platform resource mem\n"); return -ENXIO; } _udc_ctxt.regs = ioremap(res->start, resource_size(res)); if (!_udc_ctxt.regs) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, _udc_ctxt.regs); if (ret < 0) { dev_err(&pdev->dev, "udc_probe failed\n"); goto iounmap; } _udc->gadget.l1_supported = is_l1_supported; _udc_ctxt.irq = platform_get_irq(pdev, 0); if (_udc_ctxt.irq < 0) { dev_err(&pdev->dev, "IRQ not found\n"); ret = -ENXIO; goto udc_remove; } res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME"); /* Get pinctrl if target uses pinctrl */ _udc_ctxt.ci13xxx_pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(_udc_ctxt.ci13xxx_pinctrl)) { if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) { dev_err(&pdev->dev, "Error encountered while getting pinctrl"); ret = PTR_ERR(_udc_ctxt.ci13xxx_pinctrl); goto udc_remove; } dev_dbg(&pdev->dev, "Target does not use pinctrl\n"); _udc_ctxt.ci13xxx_pinctrl = NULL; } if (res) { ret = ci13xxx_msm_install_wake_gpio(pdev, res); if (ret < 0) { dev_err(&pdev->dev, "gpio irq install failed\n"); goto udc_remove; } } ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name, pdev); if (ret < 0) { dev_err(&pdev->dev, "request_irq failed\n"); goto gpio_uninstall; } pm_runtime_no_callbacks(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; gpio_uninstall: ci13xxx_msm_uninstall_wake_gpio(pdev); udc_remove: udc_remove(); iounmap: iounmap(_udc_ctxt.regs); return ret; } int ci13xxx_msm_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); free_irq(_udc_ctxt.irq, pdev); ci13xxx_msm_uninstall_wake_gpio(pdev); udc_remove(); iounmap(_udc_ctxt.regs); return 0; } void ci13xxx_msm_shutdown(struct platform_device *pdev) { ci13xxx_pullup(&_udc->gadget, 0); } void msm_hw_bam_disable(bool bam_disable) { u32 val; struct ci13xxx *udc = _udc; if (bam_disable) val = readl_relaxed(USB_GENCONFIG) | GENCONFIG_BAM_DISABLE; else val = readl_relaxed(USB_GENCONFIG) & ~GENCONFIG_BAM_DISABLE; writel_relaxed(val, USB_GENCONFIG); } static struct platform_driver ci13xxx_msm_driver = { .probe = ci13xxx_msm_probe, .driver = { .name = "msm_hsusb", }, .remove = ci13xxx_msm_remove, .shutdown = ci13xxx_msm_shutdown, }; MODULE_ALIAS("platform:msm_hsusb"); static int __init ci13xxx_msm_init(void) { return platform_driver_register(&ci13xxx_msm_driver); } module_init(ci13xxx_msm_init); static void __exit ci13xxx_msm_exit(void) { platform_driver_unregister(&ci13xxx_msm_driver); } module_exit(ci13xxx_msm_exit); MODULE_LICENSE("GPL v2");
gpl-2.0
rhtu/linux
net/sched/em_text.c
1342
3746
/* * net/sched/em_text.c Textsearch ematch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> */ #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/textsearch.h> #include <linux/tc_ematch/tc_em_text.h> #include <net/pkt_cls.h> struct text_match { u16 from_offset; u16 to_offset; u8 from_layer; u8 to_layer; struct ts_config *config; }; #define EM_TEXT_PRIV(m) ((struct text_match *) (m)->data) static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m, struct tcf_pkt_info *info) { struct text_match *tm = EM_TEXT_PRIV(m); int from, to; from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data; from += tm->from_offset; to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data; to += tm->to_offset; return skb_find_text(skb, from, to, tm->config) != UINT_MAX; } static int em_text_change(struct net *net, void *data, int len, struct tcf_ematch *m) { struct text_match *tm; struct tcf_em_text *conf = data; struct ts_config *ts_conf; int flags = 0; if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len)) return -EINVAL; if (conf->from_layer > conf->to_layer) return -EINVAL; if (conf->from_layer == conf->to_layer && conf->from_offset > conf->to_offset) return -EINVAL; retry: ts_conf = textsearch_prepare(conf->algo, (u8 *) conf + sizeof(*conf), conf->pattern_len, GFP_KERNEL, flags); if (flags & TS_AUTOLOAD) rtnl_lock(); if (IS_ERR(ts_conf)) { if (PTR_ERR(ts_conf) == -ENOENT && !(flags & TS_AUTOLOAD)) { rtnl_unlock(); flags |= TS_AUTOLOAD; goto retry; } else return PTR_ERR(ts_conf); } else if (flags & TS_AUTOLOAD) { textsearch_destroy(ts_conf); return -EAGAIN; } tm = kmalloc(sizeof(*tm), GFP_KERNEL); if (tm == NULL) { textsearch_destroy(ts_conf); return -ENOBUFS; } tm->from_offset = conf->from_offset; tm->to_offset = conf->to_offset; tm->from_layer = conf->from_layer; tm->to_layer = conf->to_layer; tm->config = ts_conf; m->datalen = sizeof(*tm); m->data = (unsigned long) tm; return 0; } static void em_text_destroy(struct tcf_ematch *m) { if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) textsearch_destroy(EM_TEXT_PRIV(m)->config); } static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) { struct text_match *tm = EM_TEXT_PRIV(m); struct tcf_em_text conf; strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1); conf.from_offset = tm->from_offset; conf.to_offset = tm->to_offset; conf.from_layer = tm->from_layer; conf.to_layer = tm->to_layer; conf.pattern_len = textsearch_get_pattern_len(tm->config); conf.pad = 0; if (nla_put_nohdr(skb, sizeof(conf), &conf) < 0) goto nla_put_failure; if (nla_append(skb, conf.pattern_len, textsearch_get_pattern(tm->config)) < 0) goto nla_put_failure; return 0; nla_put_failure: return -1; } static struct tcf_ematch_ops em_text_ops = { .kind = TCF_EM_TEXT, .change = em_text_change, .match = em_text_match, .destroy = em_text_destroy, .dump = em_text_dump, .owner = THIS_MODULE, .link = LIST_HEAD_INIT(em_text_ops.link) }; static int __init init_em_text(void) { return tcf_em_register(&em_text_ops); } static void __exit exit_em_text(void) { tcf_em_unregister(&em_text_ops); } MODULE_LICENSE("GPL"); module_init(init_em_text); module_exit(exit_em_text); MODULE_ALIAS_TCF_EMATCH(TCF_EM_TEXT);
gpl-2.0
CPDroid/Samsung_STE_Kernel
drivers/staging/msm/lcdc_toshiba_wvga_pt.c
2110
9135
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/delay.h> #include <linux/module.h> #include <mach/gpio.h> #include <mach/pmic.h> #include "msm_fb.h" #ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM #include "mddihosti.h" #endif static int spi_cs; static int spi_sclk; static int spi_mosi; static int spi_miso; struct toshiba_state_type{ boolean disp_initialized; boolean display_on; boolean disp_powered_up; }; static struct toshiba_state_type toshiba_state = { 0 }; static struct msm_panel_common_pdata *lcdc_toshiba_pdata; static void toshiba_spi_write_byte(char dc, uint8 data) { uint32 bit; int bnum; gpio_set_value(spi_sclk, 0); /* clk low */ /* dc: 0 for command, 1 for parameter */ gpio_set_value(spi_mosi, dc); udelay(1); /* at least 20 ns */ gpio_set_value(spi_sclk, 1); /* clk high */ udelay(1); /* at least 20 ns */ bnum = 8; /* 8 data bits */ bit = 0x80; while (bnum) { gpio_set_value(spi_sclk, 0); /* clk low */ if (data & bit) gpio_set_value(spi_mosi, 1); else gpio_set_value(spi_mosi, 0); udelay(1); gpio_set_value(spi_sclk, 1); /* clk high */ udelay(1); bit >>= 1; bnum--; } } static void toshiba_spi_write(char cmd, uint32 data, int num) { char *bp; gpio_set_value(spi_cs, 1); /* cs high */ /* command byte first */ toshiba_spi_write_byte(0, cmd); /* followed by parameter bytes */ if (num) { bp = (char *)&data; bp += (num - 1); while (num) { toshiba_spi_write_byte(1, *bp); num--; bp--; } } gpio_set_value(spi_cs, 0); /* cs low */ udelay(1); } void toshiba_spi_read_bytes(char cmd, uint32 *data, int num) { uint32 dbit, bits; int bnum; gpio_set_value(spi_cs, 1); /* cs high */ /* command byte first */ toshiba_spi_write_byte(0, cmd); if (num > 1) { /* extra dc bit */ gpio_set_value(spi_sclk, 0); /* clk low */ udelay(1); dbit = gpio_get_value(spi_miso);/* dc bit */ udelay(1); gpio_set_value(spi_sclk, 1); /* clk high */ } /* followed by data bytes */ bnum = num * 8; /* number of bits */ bits = 0; while (bnum) { bits <<= 1; gpio_set_value(spi_sclk, 0); /* clk low */ udelay(1); dbit = gpio_get_value(spi_miso); udelay(1); gpio_set_value(spi_sclk, 1); /* clk high */ bits |= dbit; bnum--; } *data = bits; udelay(1); gpio_set_value(spi_cs, 0); /* cs low */ udelay(1); } static void spi_pin_assign(void) { /* Setting the Default GPIO's */ spi_sclk = *(lcdc_toshiba_pdata->gpio_num); spi_cs = *(lcdc_toshiba_pdata->gpio_num + 1); spi_mosi = *(lcdc_toshiba_pdata->gpio_num + 2); spi_miso = *(lcdc_toshiba_pdata->gpio_num + 3); } static void toshiba_disp_powerup(void) { if (!toshiba_state.disp_powered_up && !toshiba_state.display_on) { /* Reset the hardware first */ /* Include DAC power up implementation here */ toshiba_state.disp_powered_up = TRUE; } } static void toshiba_disp_on(void) { uint32 data; gpio_set_value(spi_cs, 0); /* low */ gpio_set_value(spi_sclk, 1); /* high */ gpio_set_value(spi_mosi, 0); gpio_set_value(spi_miso, 0); if (toshiba_state.disp_powered_up && !toshiba_state.display_on) { toshiba_spi_write(0, 0, 0); mdelay(7); toshiba_spi_write(0, 0, 0); mdelay(7); toshiba_spi_write(0, 0, 0); mdelay(7); toshiba_spi_write(0xba, 0x11, 1); toshiba_spi_write(0x36, 0x00, 1); mdelay(1); toshiba_spi_write(0x3a, 0x60, 1); toshiba_spi_write(0xb1, 0x5d, 1); mdelay(1); toshiba_spi_write(0xb2, 0x33, 1); toshiba_spi_write(0xb3, 0x22, 1); mdelay(1); toshiba_spi_write(0xb4, 0x02, 1); toshiba_spi_write(0xb5, 0x1e, 1); /* vcs -- adjust brightness */ mdelay(1); toshiba_spi_write(0xb6, 0x27, 1); toshiba_spi_write(0xb7, 0x03, 1); mdelay(1); toshiba_spi_write(0xb9, 0x24, 1); toshiba_spi_write(0xbd, 0xa1, 1); mdelay(1); toshiba_spi_write(0xbb, 0x00, 1); toshiba_spi_write(0xbf, 0x01, 1); mdelay(1); toshiba_spi_write(0xbe, 0x00, 1); toshiba_spi_write(0xc0, 0x11, 1); mdelay(1); toshiba_spi_write(0xc1, 0x11, 1); toshiba_spi_write(0xc2, 0x11, 1); mdelay(1); toshiba_spi_write(0xc3, 0x3232, 2); mdelay(1); toshiba_spi_write(0xc4, 0x3232, 2); mdelay(1); toshiba_spi_write(0xc5, 0x3232, 2); mdelay(1); toshiba_spi_write(0xc6, 0x3232, 2); mdelay(1); toshiba_spi_write(0xc7, 0x6445, 2); mdelay(1); toshiba_spi_write(0xc8, 0x44, 1); toshiba_spi_write(0xc9, 0x52, 1); mdelay(1); toshiba_spi_write(0xca, 0x00, 1); mdelay(1); toshiba_spi_write(0xec, 0x02a4, 2); /* 0x02a4 */ mdelay(1); toshiba_spi_write(0xcf, 0x01, 1); mdelay(1); toshiba_spi_write(0xd0, 0xc003, 2); /* c003 */ mdelay(1); toshiba_spi_write(0xd1, 0x01, 1); mdelay(1); toshiba_spi_write(0xd2, 0x0028, 2); mdelay(1); toshiba_spi_write(0xd3, 0x0028, 2); mdelay(1); toshiba_spi_write(0xd4, 0x26a4, 2); mdelay(1); toshiba_spi_write(0xd5, 0x20, 1); mdelay(1); toshiba_spi_write(0xef, 0x3200, 2); mdelay(32); toshiba_spi_write(0xbc, 0x80, 1); /* wvga pass through */ toshiba_spi_write(0x3b, 0x00, 1); mdelay(1); toshiba_spi_write(0xb0, 0x16, 1); mdelay(1); toshiba_spi_write(0xb8, 0xfff5, 2); mdelay(1); toshiba_spi_write(0x11, 0, 0); mdelay(5); toshiba_spi_write(0x29, 0, 0); mdelay(5); toshiba_state.display_on = TRUE; } data = 0; toshiba_spi_read_bytes(0x04, &data, 3); printk(KERN_INFO "toshiba_disp_on: id=%x\n", data); } static int lcdc_toshiba_panel_on(struct platform_device *pdev) { if (!toshiba_state.disp_initialized) { /* Configure reset GPIO that drives DAC */ if (lcdc_toshiba_pdata->panel_config_gpio) lcdc_toshiba_pdata->panel_config_gpio(1); toshiba_disp_powerup(); toshiba_disp_on(); toshiba_state.disp_initialized = TRUE; } return 0; } static int lcdc_toshiba_panel_off(struct platform_device *pdev) { if (toshiba_state.disp_powered_up && toshiba_state.display_on) { /* Main panel power off (Deep standby in) */ toshiba_spi_write(0x28, 0, 0); /* display off */ mdelay(1); toshiba_spi_write(0xb8, 0x8002, 2); /* output control */ mdelay(1); toshiba_spi_write(0x10, 0x00, 1); /* sleep mode in */ mdelay(85); /* wait 85 msec */ toshiba_spi_write(0xb0, 0x00, 1); /* deep standby in */ mdelay(1); if (lcdc_toshiba_pdata->panel_config_gpio) lcdc_toshiba_pdata->panel_config_gpio(0); toshiba_state.display_on = FALSE; toshiba_state.disp_initialized = FALSE; } return 0; } static void lcdc_toshiba_set_backlight(struct msm_fb_data_type *mfd) { int bl_level; int ret = -EPERM; bl_level = mfd->bl_level; ret = pmic_set_led_intensity(LED_LCD, bl_level); if (ret) printk(KERN_WARNING "%s: can't set lcd backlight!\n", __func__); } static int __init toshiba_probe(struct platform_device *pdev) { if (pdev->id == 0) { lcdc_toshiba_pdata = pdev->dev.platform_data; spi_pin_assign(); return 0; } msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = toshiba_probe, .driver = { .name = "lcdc_toshiba_wvga", }, }; static struct msm_fb_panel_data toshiba_panel_data = { .on = lcdc_toshiba_panel_on, .off = lcdc_toshiba_panel_off, .set_backlight = lcdc_toshiba_set_backlight, }; static struct platform_device this_device = { .name = "lcdc_toshiba_wvga", .id = 1, .dev = { .platform_data = &toshiba_panel_data, } }; static int __init lcdc_toshiba_panel_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM if (mddi_get_client_id() != 0) return 0; ret = msm_fb_detect_client("lcdc_toshiba_wvga_pt"); if (ret) return 0; #endif ret = platform_driver_register(&this_driver); if (ret) return ret; pinfo = &toshiba_panel_data.panel_info; pinfo->xres = 480; pinfo->yres = 800; pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 18; pinfo->fb_num = 2; /* 30Mhz mdp_lcdc_pclk and mdp_lcdc_pad_pcl */ pinfo->clk_rate = 27648000; pinfo->bl_max = 15; pinfo->bl_min = 1; pinfo->lcdc.h_back_porch = 184; /* hsw = 8 + hbp=184 */ pinfo->lcdc.h_front_porch = 4; pinfo->lcdc.h_pulse_width = 8; pinfo->lcdc.v_back_porch = 2; /* vsw=1 + vbp = 2 */ pinfo->lcdc.v_front_porch = 3; pinfo->lcdc.v_pulse_width = 1; pinfo->lcdc.border_clr = 0; /* blk */ pinfo->lcdc.underflow_clr = 0xff; /* blue */ pinfo->lcdc.hsync_skew = 0; ret = platform_device_register(&this_device); if (ret) platform_driver_unregister(&this_driver); return ret; } device_initcall(lcdc_toshiba_panel_init);
gpl-2.0
Emotroid-Team/emotion_kernel_tw_edge
fs/nilfs2/page.c
2878
14961
/* * page.c - buffer/page management specific to NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Ryusuke Konishi <ryusuke@osrg.net>, * Seiji Kihara <kihara@osrg.net>. */ #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/bitops.h> #include <linux/page-flags.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/pagevec.h> #include <linux/gfp.h> #include "nilfs.h" #include "page.h" #include "mdt.h" #define NILFS_BUFFER_INHERENT_BITS \ ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked)) static struct buffer_head * __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, int blkbits, unsigned long b_state) { unsigned long first_block; struct buffer_head *bh; if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, b_state); first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); touch_buffer(bh); wait_on_buffer(bh); return bh; } struct buffer_head *nilfs_grab_buffer(struct inode *inode, struct address_space *mapping, unsigned long blkoff, unsigned long b_state) { int blkbits = inode->i_blkbits; pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); struct page *page; struct buffer_head *bh; page = grab_cache_page(mapping, index); if (unlikely(!page)) return NULL; bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); if (unlikely(!bh)) { unlock_page(page); page_cache_release(page); return NULL; } return bh; } /** * nilfs_forget_buffer - discard dirty state * @inode: owner inode of the buffer * @bh: buffer head of the buffer to be discarded */ void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; lock_buffer(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_checked(bh); clear_buffer_nilfs_redirected(bh); clear_buffer_async_write(bh); clear_buffer_dirty(bh); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); unlock_buffer(bh); brelse(bh); } /** * nilfs_copy_buffer -- copy buffer data and flags * @dbh: destination buffer * @sbh: source buffer */ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) { void *kaddr0, *kaddr1; unsigned long bits; struct page *spage = sbh->b_page, *dpage = dbh->b_page; struct buffer_head *bh; kaddr0 = kmap_atomic(spage); kaddr1 = kmap_atomic(dpage); memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); kunmap_atomic(kaddr1); kunmap_atomic(kaddr0); dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; bh = dbh; bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped)); while ((bh = bh->b_this_page) != dbh) { lock_buffer(bh); bits &= bh->b_state; unlock_buffer(bh); } if (bits & (1UL << BH_Uptodate)) SetPageUptodate(dpage); else ClearPageUptodate(dpage); if (bits & (1UL << BH_Mapped)) SetPageMappedToDisk(dpage); else ClearPageMappedToDisk(dpage); } /** * nilfs_page_buffers_clean - check if a page has dirty buffers or not. * @page: page to be checked * * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. * Otherwise, it returns non-zero value. */ int nilfs_page_buffers_clean(struct page *page) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (buffer_dirty(bh)) return 0; bh = bh->b_this_page; } while (bh != head); return 1; } void nilfs_page_bug(struct page *page) { struct address_space *m; unsigned long ino; if (unlikely(!page)) { printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); return; } m = page->mapping; ino = m ? m->host->i_ino : 0; printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " "mapping=%p ino=%lu\n", page, atomic_read(&page->_count), (unsigned long long)page->index, page->flags, m, ino); if (page_has_buffers(page)) { struct buffer_head *bh, *head; int i = 0; bh = head = page_buffers(page); do { printk(KERN_CRIT " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", i++, bh, atomic_read(&bh->b_count), (unsigned long long)bh->b_blocknr, bh->b_state); bh = bh->b_this_page; } while (bh != head); } } /** * nilfs_copy_page -- copy the page with buffers * @dst: destination page * @src: source page * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * * This function is for both data pages and btnode pages. The dirty flag * should be treated by caller. The page must not be under i/o. * Both src and dst page must be locked */ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) { struct buffer_head *dbh, *dbufs, *sbh, *sbufs; unsigned long mask = NILFS_BUFFER_INHERENT_BITS; BUG_ON(PageWriteback(dst)); sbh = sbufs = page_buffers(src); if (!page_has_buffers(dst)) create_empty_buffers(dst, sbh->b_size, 0); if (copy_dirty) mask |= (1UL << BH_Dirty); dbh = dbufs = page_buffers(dst); do { lock_buffer(sbh); lock_buffer(dbh); dbh->b_state = sbh->b_state & mask; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); copy_highpage(dst, src); if (PageUptodate(src) && !PageUptodate(dst)) SetPageUptodate(dst); else if (!PageUptodate(src) && PageUptodate(dst)) ClearPageUptodate(dst); if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) SetPageMappedToDisk(dst); else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) ClearPageMappedToDisk(dst); do { unlock_buffer(sbh); unlock_buffer(dbh); sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); } int nilfs_copy_dirty_pages(struct address_space *dmap, struct address_space *smap) { struct pagevec pvec; unsigned int i; pgoff_t index = 0; int err = 0; pagevec_init(&pvec, 0); repeat: if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) return 0; for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i], *dpage; lock_page(page); if (unlikely(!PageDirty(page))) NILFS_PAGE_BUG(page, "inconsistent dirty state"); dpage = grab_cache_page(dmap, page->index); if (unlikely(!dpage)) { /* No empty page is added to the page cache */ err = -ENOMEM; unlock_page(page); break; } if (unlikely(!page_has_buffers(page))) NILFS_PAGE_BUG(page, "found empty page in dat page cache"); nilfs_copy_page(dpage, page, 1); __set_page_dirty_nobuffers(dpage); unlock_page(dpage); page_cache_release(dpage); unlock_page(page); } pagevec_release(&pvec); cond_resched(); if (likely(!err)) goto repeat; return err; } /** * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache * @dmap: destination page cache * @smap: source page cache * * No pages must no be added to the cache during this process. * This must be ensured by the caller. */ void nilfs_copy_back_pages(struct address_space *dmap, struct address_space *smap) { struct pagevec pvec; unsigned int i, n; pgoff_t index = 0; int err; pagevec_init(&pvec, 0); repeat: n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE); if (!n) return; index = pvec.pages[n - 1]->index + 1; for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i], *dpage; pgoff_t offset = page->index; lock_page(page); dpage = find_lock_page(dmap, offset); if (dpage) { /* override existing page on the destination cache */ WARN_ON(PageDirty(dpage)); nilfs_copy_page(dpage, page, 0); unlock_page(dpage); page_cache_release(dpage); } else { struct page *page2; /* move the page to the destination cache */ spin_lock_irq(&smap->tree_lock); page2 = radix_tree_delete(&smap->page_tree, offset); WARN_ON(page2 != page); smap->nrpages--; spin_unlock_irq(&smap->tree_lock); spin_lock_irq(&dmap->tree_lock); err = radix_tree_insert(&dmap->page_tree, offset, page); if (unlikely(err < 0)) { WARN_ON(err == -EEXIST); page->mapping = NULL; page_cache_release(page); /* for cache */ } else { page->mapping = dmap; dmap->nrpages++; if (PageDirty(page)) radix_tree_tag_set(&dmap->page_tree, offset, PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&dmap->tree_lock); } unlock_page(page); } pagevec_release(&pvec); cond_resched(); goto repeat; } /** * nilfs_clear_dirty_pages - discard dirty pages in address space * @mapping: address space with dirty pages for discarding * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) { struct pagevec pvec; unsigned int i; pgoff_t index = 0; pagevec_init(&pvec, 0); while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; lock_page(page); nilfs_clear_dirty_page(page, silent); unlock_page(page); } pagevec_release(&pvec); cond_resched(); } } /** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_page(struct page *page, bool silent) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; BUG_ON(!PageLocked(page)); if (!silent) { nilfs_warning(sb, __func__, "discard page: offset %lld, ino %lu", page_offset(page), inode->i_ino); } ClearPageUptodate(page); ClearPageMappedToDisk(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { lock_buffer(bh); if (!silent) { nilfs_warning(sb, __func__, "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } clear_buffer_async_write(bh); clear_buffer_dirty(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_checked(bh); clear_buffer_nilfs_redirected(bh); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } __nilfs_clear_page_dirty(page); } unsigned nilfs_page_count_clean_buffers(struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; struct buffer_head *bh, *head; unsigned nc = 0; for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + bh->b_size; if (block_end > from && block_start < to && !buffer_dirty(bh)) nc++; } return nc; } void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, struct backing_dev_info *bdi) { mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; mapping->backing_dev_info = bdi; mapping->a_ops = &empty_aops; } /* * NILFS2 needs clear_page_dirty() in the following two cases: * * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears * page dirty flags when it copies back pages from the shadow cache * (gcdat->{i_mapping,i_btnode_cache}) to its original cache * (dat->{i_mapping,i_btnode_cache}). * * 2) Some B-tree operations like insertion or deletion may dispose buffers * in dirty state, and this needs to cancel the dirty state of their pages. */ int __nilfs_clear_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; if (mapping) { spin_lock_irq(&mapping->tree_lock); if (test_bit(PG_dirty, &page->flags)) { radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); spin_unlock_irq(&mapping->tree_lock); return clear_page_dirty_for_io(page); } spin_unlock_irq(&mapping->tree_lock); return 0; } return TestClearPageDirty(page); } /** * nilfs_find_uncommitted_extent - find extent of uncommitted data * @inode: inode * @start_blk: start block offset (in) * @blkoff: start offset of the found extent (out) * * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in * @blkoff and return its length in blocks. Otherwise, zero is * returned. */ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, sector_t *blkoff) { unsigned int i; pgoff_t index; unsigned int nblocks_in_page; unsigned long length = 0; sector_t b; struct pagevec pvec; struct page *page; if (inode->i_mapping->nrpages == 0) return 0; index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); repeat: pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, pvec.pages); if (pvec.nr == 0) return length; if (length > 0 && pvec.pages[0]->index > index) goto out; b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); i = 0; do { page = pvec.pages[i]; lock_page(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (b < start_blk) continue; if (buffer_delay(bh)) { if (length == 0) *blkoff = b; length++; } else if (length > 0) { goto out_locked; } } while (++b, bh = bh->b_this_page, bh != head); } else { if (length > 0) goto out_locked; b += nblocks_in_page; } unlock_page(page); } while (++i < pagevec_count(&pvec)); index = page->index + 1; pagevec_release(&pvec); cond_resched(); goto repeat; out_locked: unlock_page(page); out: pagevec_release(&pvec); return length; }
gpl-2.0
garick82/android_kernel_samsung_codina
arch/mips/kernel/branch.c
3134
5589
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/signal.h> #include <asm/branch.h> #include <asm/cpu.h> #include <asm/cpu-features.h> #include <asm/fpu.h> #include <asm/inst.h> #include <asm/ptrace.h> #include <asm/uaccess.h> /* * Compute the return address and do emulate branch simulation, if required. */ int __compute_return_epc(struct pt_regs *regs) { unsigned int __user *addr; unsigned int bit, fcr31, dspcontrol; long epc; union mips_instruction insn; epc = regs->cp0_epc; if (epc & 3) goto unaligned; /* * Read the instruction */ addr = (unsigned int __user *) epc; if (__get_user(insn.word, addr)) { force_sig(SIGSEGV, current); return -EFAULT; } switch (insn.i_format.opcode) { /* * jr and jalr are in r_format format. */ case spec_op: switch (insn.r_format.func) { case jalr_op: regs->regs[insn.r_format.rd] = epc + 8; /* Fall through */ case jr_op: regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } break; /* * This group contains: * bltz_op, bgez_op, bltzl_op, bgezl_op, * bltzal_op, bgezal_op, bltzall_op, bgezall_op. */ case bcond_op: switch (insn.i_format.rt) { case bltz_op: case bltzl_op: if ((long)regs->regs[insn.i_format.rs] < 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case bgez_op: case bgezl_op: if ((long)regs->regs[insn.i_format.rs] >= 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case bltzal_op: case bltzall_op: regs->regs[31] = epc + 8; if ((long)regs->regs[insn.i_format.rs] < 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case bgezal_op: case bgezall_op: regs->regs[31] = epc + 8; if ((long)regs->regs[insn.i_format.rs] >= 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case bposge32_op: if (!cpu_has_dsp) goto sigill; dspcontrol = rddsp(0x01); if (dspcontrol >= 32) { epc = epc + 4 + (insn.i_format.simmediate << 2); } else epc += 8; regs->cp0_epc = epc; break; } break; /* * These are unconditional and in j_format. */ case jal_op: regs->regs[31] = regs->cp0_epc + 8; case j_op: epc += 4; epc >>= 28; epc <<= 28; epc |= (insn.j_format.target << 2); regs->cp0_epc = epc; break; /* * These are conditional and in i_format. */ case beq_op: case beql_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case bne_op: case bnel_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case blez_op: /* not really i_format */ case blezl_op: /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case bgtz_op: case bgtzl_op: /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; /* * And now the FPA/cp1 branch instructions. */ case cop1_op: preempt_disable(); if (is_fpu_owner()) asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); else fcr31 = current->thread.fpu.fcr31; preempt_enable(); bit = (insn.i_format.rt >> 2); bit += (bit != 0); bit += 23; switch (insn.i_format.rt & 3) { case 0: /* bc1f */ case 2: /* bc1fl */ if (~fcr31 & (1 << bit)) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case 1: /* bc1t */ case 3: /* bc1tl */ if (fcr31 & (1 << bit)) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; } break; #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case ldc2_op: /* This is bbit032 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt+32))) == 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case swc2_op: /* This is bbit1 on Octeon */ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; case sdc2_op: /* This is bbit132 on Octeon */ if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt+32))) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; regs->cp0_epc = epc; break; #endif } return 0; unaligned: printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; sigill: printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; }
gpl-2.0
bilalliberty/SebastianFM-kernel
drivers/usb/gadget/f_eem.c
3646
15318
/* * f_eem.c -- USB CDC Ethernet (EEM) link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Copyright (C) 2009 EF Johnson Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/slab.h> #include "u_ether.h" #define EEM_HLEN 2 /* * This function is a "CDC Ethernet Emulation Model" (CDC EEM) * Ethernet link. */ struct f_eem { struct gether port; u8 ctrl_id; }; static inline struct f_eem *func_to_eem(struct usb_function *f) { return container_of(f, struct f_eem, port.func); } /*-------------------------------------------------------------------------*/ /* interface descriptor: */ static struct usb_interface_descriptor eem_intf __initdata = { .bLength = sizeof eem_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_EEM, .bInterfaceProtocol = USB_CDC_PROTO_EEM, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *eem_fs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_fs_in_desc, (struct usb_descriptor_header *) &eem_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *eem_hs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_hs_in_desc, (struct usb_descriptor_header *) &eem_hs_out_desc, NULL, }; /* super speed support: */ static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = { .bLength = sizeof eem_ss_bulk_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *eem_ss_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_ss_in_desc, (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, (struct usb_descriptor_header *) &eem_ss_out_desc, (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, NULL, }; /* string descriptors: */ static struct usb_string eem_string_defs[] = { [0].s = "CDC Ethernet Emulation Model (EEM)", { } /* end of list */ }; static struct usb_gadget_strings eem_string_table = { .language = 0x0409, /* en-us */ .strings = eem_string_defs, }; static struct usb_gadget_strings *eem_strings[] = { &eem_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* device either stalls (value < 0) or reports success */ return value; } static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; struct net_device *net; /* we know alt == 0, so this is an activation or a reset */ if (alt != 0) goto fail; if (intf == eem->ctrl_id) { if (eem->port.in_ep->driver_data) { DBG(cdev, "reset eem\n"); gether_disconnect(&eem->port); } if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) { DBG(cdev, "init eem\n"); if (config_ep_by_speed(cdev->gadget, f, eem->port.in_ep) || config_ep_by_speed(cdev->gadget, f, eem->port.out_ep)) { eem->port.in_ep->desc = NULL; eem->port.out_ep->desc = NULL; goto fail; } } /* zlps should not occur because zero-length EEM packets * will be inserted in those cases where they would occur */ eem->port.is_zlp_ok = 1; eem->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate eem\n"); net = gether_connect(&eem->port); if (IS_ERR(net)) return PTR_ERR(net); } else goto fail; return 0; fail: return -EINVAL; } static void eem_disable(struct usb_function *f) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "eem deactivated\n"); if (eem->port.in_ep->driver_data) gether_disconnect(&eem->port); } /*-------------------------------------------------------------------------*/ /* EEM function driver setup/binding */ static int __init eem_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_eem *eem = func_to_eem(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; eem->ctrl_id = status; eem_intf.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); if (!ep) goto fail; eem->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); if (!ep) goto fail; eem->port.out_ep = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(eem_fs_function); if (!f->descriptors) goto fail; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { eem_hs_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_hs_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(eem_hs_function); if (!f->hs_descriptors) goto fail; } if (gadget_is_superspeed(c->cdev->gadget)) { eem_ss_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(eem_ss_function); if (!f->ss_descriptors) goto fail; } DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n", gadget_is_superspeed(c->cdev->gadget) ? "super" : gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", eem->port.in_ep->name, eem->port.out_ep->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); /* we might as well release our claims on endpoints */ if (eem->port.out_ep->desc) eem->port.out_ep->driver_data = NULL; if (eem->port.in_ep->desc) eem->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void eem_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_eem *eem = func_to_eem(f); DBG(c->cdev, "eem unbind\n"); if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(eem); } static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = (struct sk_buff *)req->context; dev_kfree_skb_any(skb); } /* * Add the EEM header and ethernet checksum. * We currently do not attempt to put multiple ethernet frames * into a single USB transfer */ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb) { struct sk_buff *skb2 = NULL; struct usb_ep *in = port->in_ep; int padlen = 0; u16 len = skb->len; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, * stick two bytes of zero-length EEM packet on the end. */ if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0) padlen += 2; if ((tailroom >= (ETH_FCS_LEN + padlen)) && (headroom >= EEM_HLEN)) goto done; } skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return skb; done: /* use the "no CRC" option */ put_unaligned_be32(0xdeadbeef, skb_put(skb, 4)); /* EEM packet header format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel CRC) * b15: bmType (0 == data) */ len = skb->len; put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); /* add a zero-length EEM packet, if needed */ if (padlen) put_unaligned_le16(0, skb_put(skb, 2)); return skb; } /* * Remove the EEM header. Note that there can be many EEM packets in a single * USB transfer, so we need to break them out and handle them independently. */ static int eem_unwrap(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list) { struct usb_composite_dev *cdev = port->func.config->cdev; int status = 0; do { struct sk_buff *skb2; u16 header; u16 len = 0; if (skb->len < EEM_HLEN) { status = -EINVAL; DBG(cdev, "invalid EEM header\n"); goto error; } /* remove the EEM header */ header = get_unaligned_le16(skb->data); skb_pull(skb, EEM_HLEN); /* EEM packet header format: * b0..14: EEM type dependent (data or command) * b15: bmType (0 == data, 1 == command) */ if (header & BIT(15)) { struct usb_request *req = cdev->req; u16 bmEEMCmd; /* EEM command packet format: * b0..10: bmEEMCmdParam * b11..13: bmEEMCmd * b14: reserved (must be zero) * b15: bmType (1 == command) */ if (header & BIT(14)) continue; bmEEMCmd = (header >> 11) & 0x7; switch (bmEEMCmd) { case 0: /* echo */ len = header & 0x7FF; if (skb->len < len) { status = -EOVERFLOW; goto error; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "EEM echo response error\n"); goto next; } skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); skb_copy_bits(skb2, 0, req->buf, skb2->len); req->length = skb2->len; req->complete = eem_cmd_complete; req->zero = 1; req->context = skb2; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; case 1: /* echo response */ case 2: /* suspend hint */ case 3: /* response hint */ case 4: /* response complete hint */ case 5: /* tickle */ default: /* reserved */ continue; } } else { u32 crc, crc2; struct sk_buff *skb3; /* check for zero-length EEM packet */ if (header == 0) continue; /* EEM data packet format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel, 1 == calculated) * b15: bmType (0 == data) */ len = header & 0x3FFF; if ((skb->len < len) || (len < (ETH_HLEN + ETH_FCS_LEN))) { status = -EINVAL; goto error; } /* validate CRC */ if (header & BIT(14)) { crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); crc2 = ~crc32_le(~0, skb->data, len - ETH_FCS_LEN); } else { crc = get_unaligned_be32(skb->data + len - ETH_FCS_LEN); crc2 = 0xdeadbeef; } if (crc != crc2) { DBG(cdev, "invalid EEM CRC\n"); goto next; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "unable to unframe EEM packet\n"); continue; } skb_trim(skb2, len - ETH_FCS_LEN); skb3 = skb_copy_expand(skb2, NET_IP_ALIGN, 0, GFP_ATOMIC); if (unlikely(!skb3)) { DBG(cdev, "unable to realign EEM packet\n"); dev_kfree_skb_any(skb2); continue; } dev_kfree_skb_any(skb2); skb_queue_tail(list, skb3); } next: skb_pull(skb, len); } while (skb->len); error: dev_kfree_skb_any(skb); return status; } /** * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration * @c: the configuration to support the network link * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int __init eem_bind_config(struct usb_configuration *c) { struct f_eem *eem; int status; /* maybe allocate device-global string IDs */ if (eem_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; eem_string_defs[0].id = status; eem_intf.iInterface = status; } /* allocate and initialize one new instance */ eem = kzalloc(sizeof *eem, GFP_KERNEL); if (!eem) return -ENOMEM; eem->port.cdc_filter = DEFAULT_FILTER; eem->port.func.name = "cdc_eem"; eem->port.func.strings = eem_strings; /* descriptors are per-instance copies */ eem->port.func.bind = eem_bind; eem->port.func.unbind = eem_unbind; eem->port.func.set_alt = eem_set_alt; eem->port.func.setup = eem_setup; eem->port.func.disable = eem_disable; eem->port.wrap = eem_wrap; eem->port.unwrap = eem_unwrap; eem->port.header_len = EEM_HLEN; status = usb_add_function(c, &eem->port.func); if (status) kfree(eem); return status; }
gpl-2.0
brymaster5000/m7wlv_4.3
drivers/usb/gadget/f_eem.c
3646
15318
/* * f_eem.c -- USB CDC Ethernet (EEM) link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Copyright (C) 2009 EF Johnson Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/slab.h> #include "u_ether.h" #define EEM_HLEN 2 /* * This function is a "CDC Ethernet Emulation Model" (CDC EEM) * Ethernet link. */ struct f_eem { struct gether port; u8 ctrl_id; }; static inline struct f_eem *func_to_eem(struct usb_function *f) { return container_of(f, struct f_eem, port.func); } /*-------------------------------------------------------------------------*/ /* interface descriptor: */ static struct usb_interface_descriptor eem_intf __initdata = { .bLength = sizeof eem_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_EEM, .bInterfaceProtocol = USB_CDC_PROTO_EEM, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *eem_fs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_fs_in_desc, (struct usb_descriptor_header *) &eem_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *eem_hs_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_hs_in_desc, (struct usb_descriptor_header *) &eem_hs_out_desc, NULL, }; /* super speed support: */ static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = { .bLength = sizeof eem_ss_bulk_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *eem_ss_function[] __initdata = { /* CDC EEM control descriptors */ (struct usb_descriptor_header *) &eem_intf, (struct usb_descriptor_header *) &eem_ss_in_desc, (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, (struct usb_descriptor_header *) &eem_ss_out_desc, (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, NULL, }; /* string descriptors: */ static struct usb_string eem_string_defs[] = { [0].s = "CDC Ethernet Emulation Model (EEM)", { } /* end of list */ }; static struct usb_gadget_strings eem_string_table = { .language = 0x0409, /* en-us */ .strings = eem_string_defs, }; static struct usb_gadget_strings *eem_strings[] = { &eem_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = f->config->cdev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* device either stalls (value < 0) or reports success */ return value; } static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; struct net_device *net; /* we know alt == 0, so this is an activation or a reset */ if (alt != 0) goto fail; if (intf == eem->ctrl_id) { if (eem->port.in_ep->driver_data) { DBG(cdev, "reset eem\n"); gether_disconnect(&eem->port); } if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) { DBG(cdev, "init eem\n"); if (config_ep_by_speed(cdev->gadget, f, eem->port.in_ep) || config_ep_by_speed(cdev->gadget, f, eem->port.out_ep)) { eem->port.in_ep->desc = NULL; eem->port.out_ep->desc = NULL; goto fail; } } /* zlps should not occur because zero-length EEM packets * will be inserted in those cases where they would occur */ eem->port.is_zlp_ok = 1; eem->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate eem\n"); net = gether_connect(&eem->port); if (IS_ERR(net)) return PTR_ERR(net); } else goto fail; return 0; fail: return -EINVAL; } static void eem_disable(struct usb_function *f) { struct f_eem *eem = func_to_eem(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "eem deactivated\n"); if (eem->port.in_ep->driver_data) gether_disconnect(&eem->port); } /*-------------------------------------------------------------------------*/ /* EEM function driver setup/binding */ static int __init eem_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_eem *eem = func_to_eem(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; eem->ctrl_id = status; eem_intf.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); if (!ep) goto fail; eem->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); if (!ep) goto fail; eem->port.out_ep = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(eem_fs_function); if (!f->descriptors) goto fail; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { eem_hs_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_hs_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(eem_hs_function); if (!f->hs_descriptors) goto fail; } if (gadget_is_superspeed(c->cdev->gadget)) { eem_ss_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(eem_ss_function); if (!f->ss_descriptors) goto fail; } DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n", gadget_is_superspeed(c->cdev->gadget) ? "super" : gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", eem->port.in_ep->name, eem->port.out_ep->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); /* we might as well release our claims on endpoints */ if (eem->port.out_ep->desc) eem->port.out_ep->driver_data = NULL; if (eem->port.in_ep->desc) eem->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void eem_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_eem *eem = func_to_eem(f); DBG(c->cdev, "eem unbind\n"); if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(eem); } static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = (struct sk_buff *)req->context; dev_kfree_skb_any(skb); } /* * Add the EEM header and ethernet checksum. * We currently do not attempt to put multiple ethernet frames * into a single USB transfer */ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb) { struct sk_buff *skb2 = NULL; struct usb_ep *in = port->in_ep; int padlen = 0; u16 len = skb->len; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, * stick two bytes of zero-length EEM packet on the end. */ if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0) padlen += 2; if ((tailroom >= (ETH_FCS_LEN + padlen)) && (headroom >= EEM_HLEN)) goto done; } skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return skb; done: /* use the "no CRC" option */ put_unaligned_be32(0xdeadbeef, skb_put(skb, 4)); /* EEM packet header format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel CRC) * b15: bmType (0 == data) */ len = skb->len; put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); /* add a zero-length EEM packet, if needed */ if (padlen) put_unaligned_le16(0, skb_put(skb, 2)); return skb; } /* * Remove the EEM header. Note that there can be many EEM packets in a single * USB transfer, so we need to break them out and handle them independently. */ static int eem_unwrap(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list) { struct usb_composite_dev *cdev = port->func.config->cdev; int status = 0; do { struct sk_buff *skb2; u16 header; u16 len = 0; if (skb->len < EEM_HLEN) { status = -EINVAL; DBG(cdev, "invalid EEM header\n"); goto error; } /* remove the EEM header */ header = get_unaligned_le16(skb->data); skb_pull(skb, EEM_HLEN); /* EEM packet header format: * b0..14: EEM type dependent (data or command) * b15: bmType (0 == data, 1 == command) */ if (header & BIT(15)) { struct usb_request *req = cdev->req; u16 bmEEMCmd; /* EEM command packet format: * b0..10: bmEEMCmdParam * b11..13: bmEEMCmd * b14: reserved (must be zero) * b15: bmType (1 == command) */ if (header & BIT(14)) continue; bmEEMCmd = (header >> 11) & 0x7; switch (bmEEMCmd) { case 0: /* echo */ len = header & 0x7FF; if (skb->len < len) { status = -EOVERFLOW; goto error; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "EEM echo response error\n"); goto next; } skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); skb_copy_bits(skb2, 0, req->buf, skb2->len); req->length = skb2->len; req->complete = eem_cmd_complete; req->zero = 1; req->context = skb2; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; case 1: /* echo response */ case 2: /* suspend hint */ case 3: /* response hint */ case 4: /* response complete hint */ case 5: /* tickle */ default: /* reserved */ continue; } } else { u32 crc, crc2; struct sk_buff *skb3; /* check for zero-length EEM packet */ if (header == 0) continue; /* EEM data packet format: * b0..13: length of ethernet frame * b14: bmCRC (0 == sentinel, 1 == calculated) * b15: bmType (0 == data) */ len = header & 0x3FFF; if ((skb->len < len) || (len < (ETH_HLEN + ETH_FCS_LEN))) { status = -EINVAL; goto error; } /* validate CRC */ if (header & BIT(14)) { crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); crc2 = ~crc32_le(~0, skb->data, len - ETH_FCS_LEN); } else { crc = get_unaligned_be32(skb->data + len - ETH_FCS_LEN); crc2 = 0xdeadbeef; } if (crc != crc2) { DBG(cdev, "invalid EEM CRC\n"); goto next; } skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "unable to unframe EEM packet\n"); continue; } skb_trim(skb2, len - ETH_FCS_LEN); skb3 = skb_copy_expand(skb2, NET_IP_ALIGN, 0, GFP_ATOMIC); if (unlikely(!skb3)) { DBG(cdev, "unable to realign EEM packet\n"); dev_kfree_skb_any(skb2); continue; } dev_kfree_skb_any(skb2); skb_queue_tail(list, skb3); } next: skb_pull(skb, len); } while (skb->len); error: dev_kfree_skb_any(skb); return status; } /** * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration * @c: the configuration to support the network link * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int __init eem_bind_config(struct usb_configuration *c) { struct f_eem *eem; int status; /* maybe allocate device-global string IDs */ if (eem_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; eem_string_defs[0].id = status; eem_intf.iInterface = status; } /* allocate and initialize one new instance */ eem = kzalloc(sizeof *eem, GFP_KERNEL); if (!eem) return -ENOMEM; eem->port.cdc_filter = DEFAULT_FILTER; eem->port.func.name = "cdc_eem"; eem->port.func.strings = eem_strings; /* descriptors are per-instance copies */ eem->port.func.bind = eem_bind; eem->port.func.unbind = eem_unbind; eem->port.func.set_alt = eem_set_alt; eem->port.func.setup = eem_setup; eem->port.func.disable = eem_disable; eem->port.wrap = eem_wrap; eem->port.unwrap = eem_unwrap; eem->port.header_len = EEM_HLEN; status = usb_add_function(c, &eem->port.func); if (status) kfree(eem); return status; }
gpl-2.0
talnoah/Sprint-One-4.2.2-Sense
arch/powerpc/kernel/ppc_ksyms.c
4414
4746
#include <linux/export.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/elfcore.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/screen_info.h> #include <linux/vt_kern.h> #include <linux/nvram.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/bitops.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/checksum.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <asm/pmac_feature.h> #include <asm/dma.h> #include <asm/machdep.h> #include <asm/hw_irq.h> #include <asm/nvram.h> #include <asm/mmu_context.h> #include <asm/backlight.h> #include <asm/time.h> #include <asm/cputable.h> #include <asm/btext.h> #include <asm/div64.h> #include <asm/signal.h> #include <asm/dcr.h> #include <asm/ftrace.h> #include <asm/switch_to.h> #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); extern void do_IRQ(struct pt_regs *regs); extern void machine_check_exception(struct pt_regs *regs); extern void alignment_exception(struct pt_regs *regs); extern void program_check_exception(struct pt_regs *regs); extern void single_step_exception(struct pt_regs *regs); extern int sys_sigreturn(struct pt_regs *regs); EXPORT_SYMBOL(clear_pages); EXPORT_SYMBOL(ISA_DMA_THRESHOLD); EXPORT_SYMBOL(DMA_MODE_READ); EXPORT_SYMBOL(DMA_MODE_WRITE); EXPORT_SYMBOL(transfer_to_handler); EXPORT_SYMBOL(do_IRQ); EXPORT_SYMBOL(machine_check_exception); EXPORT_SYMBOL(alignment_exception); EXPORT_SYMBOL(program_check_exception); EXPORT_SYMBOL(single_step_exception); EXPORT_SYMBOL(sys_sigreturn); #endif #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(csum_tcpudp_magic); EXPORT_SYMBOL(__copy_tofrom_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(copy_page); #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) EXPORT_SYMBOL(isa_io_base); EXPORT_SYMBOL(isa_mem_base); EXPORT_SYMBOL(pci_dram_offset); #endif /* CONFIG_PCI */ EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(giveup_fpu); #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL(giveup_altivec); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX EXPORT_SYMBOL(giveup_vsx); EXPORT_SYMBOL_GPL(__giveup_vsx); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE EXPORT_SYMBOL(giveup_spe); #endif /* CONFIG_SPE */ #ifndef CONFIG_PPC64 EXPORT_SYMBOL(flush_instruction_cache); #endif EXPORT_SYMBOL(__flush_icache_range); EXPORT_SYMBOL(flush_dcache_range); #ifdef CONFIG_SMP #ifdef CONFIG_PPC32 EXPORT_SYMBOL(smp_hw_index); #endif #endif #ifdef CONFIG_ADB EXPORT_SYMBOL(adb_request); EXPORT_SYMBOL(adb_register); EXPORT_SYMBOL(adb_unregister); EXPORT_SYMBOL(adb_poll); EXPORT_SYMBOL(adb_try_handler_change); #endif /* CONFIG_ADB */ #ifdef CONFIG_ADB_CUDA EXPORT_SYMBOL(cuda_request); EXPORT_SYMBOL(cuda_poll); #endif /* CONFIG_ADB_CUDA */ EXPORT_SYMBOL(to_tm); #ifdef CONFIG_PPC32 long long __ashrdi3(long long, int); long long __ashldi3(long long, int); long long __lshrdi3(long long, int); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); int __ucmpdi2(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ucmpdi2); #endif EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); #if defined(CONFIG_FB_VGA16_MODULE) EXPORT_SYMBOL(screen_info); #endif #ifdef CONFIG_PPC32 EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(tb_ticks_per_jiffy); EXPORT_SYMBOL(cacheable_memcpy); EXPORT_SYMBOL(cacheable_memzero); #endif #ifdef CONFIG_PPC32 EXPORT_SYMBOL(switch_mmu_context); #endif #ifdef CONFIG_PPC_STD_MMU_32 extern long mol_trampoline; EXPORT_SYMBOL(mol_trampoline); /* For MOL */ EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ #ifdef CONFIG_SMP extern int mmu_hash_lock; EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ #endif /* CONFIG_SMP */ extern long *intercept_table; EXPORT_SYMBOL(intercept_table); #endif /* CONFIG_PPC_STD_MMU_32 */ #ifdef CONFIG_PPC_DCR_NATIVE EXPORT_SYMBOL(__mtdcr); EXPORT_SYMBOL(__mfdcr); #endif EXPORT_SYMBOL(empty_zero_page); #ifdef CONFIG_PPC64 EXPORT_SYMBOL(__arch_hweight8); EXPORT_SYMBOL(__arch_hweight16); EXPORT_SYMBOL(__arch_hweight32); EXPORT_SYMBOL(__arch_hweight64); #endif
gpl-2.0
vigor/vigor_aosp_kernel
drivers/bluetooth/btmrvl_main.c
6974
15195
/** * Marvell Bluetooth driver * * Copyright (C) 2009, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. **/ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmrvl_drv.h" #define VERSION "1.0" /* * This function is called by interface specific interrupt handler. * It updates Power Save & Host Sleep states, and wakes up the main * thread. */ void btmrvl_interrupt(struct btmrvl_private *priv) { priv->adapter->ps_state = PS_AWAKE; priv->adapter->wakeup_tries = 0; priv->adapter->int_count++; wake_up_interruptible(&priv->main_thread.wait_q); } EXPORT_SYMBOL_GPL(btmrvl_interrupt); void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; struct hci_ev_cmd_complete *ec; u16 opcode, ocf; if (hdr->evt == HCI_EV_CMD_COMPLETE) { ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE); opcode = __le16_to_cpu(ec->opcode); ocf = hci_opcode_ocf(opcode); if (ocf == BT_CMD_MODULE_CFG_REQ && priv->btmrvl_dev.sendcmdflag) { priv->btmrvl_dev.sendcmdflag = false; priv->adapter->cmd_complete = true; wake_up_interruptible(&priv->adapter->cmd_wait_q); } } } EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt); int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) { struct btmrvl_adapter *adapter = priv->adapter; struct btmrvl_event *event; int ret = 0; event = (struct btmrvl_event *) skb->data; if (event->ec != 0xff) { BT_DBG("Not Marvell Event=%x", event->ec); ret = -EINVAL; goto exit; } switch (event->data[0]) { case BT_CMD_AUTO_SLEEP_MODE: if (!event->data[2]) { if (event->data[1] == BT_PS_ENABLE) adapter->psmode = 1; else adapter->psmode = 0; BT_DBG("PS Mode:%s", (adapter->psmode) ? "Enable" : "Disable"); } else { BT_DBG("PS Mode command failed"); } break; case BT_CMD_HOST_SLEEP_CONFIG: if (!event->data[3]) BT_DBG("gpio=%x, gap=%x", event->data[1], event->data[2]); else BT_DBG("HSCFG command failed"); break; case BT_CMD_HOST_SLEEP_ENABLE: if (!event->data[1]) { adapter->hs_state = HS_ACTIVATED; if (adapter->psmode) adapter->ps_state = PS_SLEEP; wake_up_interruptible(&adapter->cmd_wait_q); BT_DBG("HS ACTIVATED!"); } else { BT_DBG("HS Enable failed"); } break; case BT_CMD_MODULE_CFG_REQ: if (priv->btmrvl_dev.sendcmdflag && event->data[1] == MODULE_BRINGUP_REQ) { BT_DBG("EVENT:%s", ((event->data[2] == MODULE_BROUGHT_UP) || (event->data[2] == MODULE_ALREADY_UP)) ? "Bring-up succeed" : "Bring-up failed"); if (event->length > 3 && event->data[3]) priv->btmrvl_dev.dev_type = HCI_AMP; else priv->btmrvl_dev.dev_type = HCI_BREDR; BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type); } else if (priv->btmrvl_dev.sendcmdflag && event->data[1] == MODULE_SHUTDOWN_REQ) { BT_DBG("EVENT:%s", (event->data[2]) ? "Shutdown failed" : "Shutdown succeed"); } else { BT_DBG("BT_CMD_MODULE_CFG_REQ resp for APP"); ret = -EINVAL; } break; case BT_EVENT_POWER_STATE: if (event->data[1] == BT_PS_SLEEP) adapter->ps_state = PS_SLEEP; BT_DBG("EVENT:%s", (adapter->ps_state) ? "PS_SLEEP" : "PS_AWAKE"); break; default: BT_DBG("Unknown Event=%d", event->data[0]); ret = -EINVAL; break; } exit: if (!ret) kfree_skb(skb); return ret; } EXPORT_SYMBOL_GPL(btmrvl_process_event); int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd) { struct sk_buff *skb; struct btmrvl_cmd *cmd; int ret = 0; skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); if (skb == NULL) { BT_ERR("No free skb"); return -ENOMEM; } cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ)); cmd->length = 1; cmd->data[0] = subcmd; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb->dev = (void *) priv->btmrvl_dev.hcidev; skb_queue_head(&priv->adapter->tx_queue, skb); priv->btmrvl_dev.sendcmdflag = true; priv->adapter->cmd_complete = false; BT_DBG("Queue module cfg Command"); wake_up_interruptible(&priv->main_thread.wait_q); if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) { ret = -ETIMEDOUT; BT_ERR("module_cfg_cmd(%x): timeout: %d", subcmd, priv->btmrvl_dev.sendcmdflag); } BT_DBG("module cfg Command done"); return ret; } EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); int btmrvl_enable_ps(struct btmrvl_private *priv) { struct sk_buff *skb; struct btmrvl_cmd *cmd; skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); if (skb == NULL) { BT_ERR("No free skb"); return -ENOMEM; } cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_AUTO_SLEEP_MODE)); cmd->length = 1; if (priv->btmrvl_dev.psmode) cmd->data[0] = BT_PS_ENABLE; else cmd->data[0] = BT_PS_DISABLE; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb->dev = (void *) priv->btmrvl_dev.hcidev; skb_queue_head(&priv->adapter->tx_queue, skb); BT_DBG("Queue PSMODE Command:%d", cmd->data[0]); return 0; } EXPORT_SYMBOL_GPL(btmrvl_enable_ps); static int btmrvl_enable_hs(struct btmrvl_private *priv) { struct sk_buff *skb; struct btmrvl_cmd *cmd; int ret = 0; skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); if (skb == NULL) { BT_ERR("No free skb"); return -ENOMEM; } cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE)); cmd->length = 0; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb->dev = (void *) priv->btmrvl_dev.hcidev; skb_queue_head(&priv->adapter->tx_queue, skb); BT_DBG("Queue hs enable Command"); wake_up_interruptible(&priv->main_thread.wait_q); if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, priv->adapter->hs_state, msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) { ret = -ETIMEDOUT; BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state, priv->adapter->ps_state, priv->adapter->wakeup_tries); } return ret; } int btmrvl_prepare_command(struct btmrvl_private *priv) { struct sk_buff *skb = NULL; struct btmrvl_cmd *cmd; int ret = 0; if (priv->btmrvl_dev.hscfgcmd) { priv->btmrvl_dev.hscfgcmd = 0; skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); if (skb == NULL) { BT_ERR("No free skb"); return -ENOMEM; } cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG)); cmd->length = 2; cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff); bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb->dev = (void *) priv->btmrvl_dev.hcidev; skb_queue_head(&priv->adapter->tx_queue, skb); BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0], cmd->data[1]); } if (priv->btmrvl_dev.pscmd) { priv->btmrvl_dev.pscmd = 0; btmrvl_enable_ps(priv); } if (priv->btmrvl_dev.hscmd) { priv->btmrvl_dev.hscmd = 0; if (priv->btmrvl_dev.hsmode) { ret = btmrvl_enable_hs(priv); } else { ret = priv->hw_wakeup_firmware(priv); priv->adapter->hs_state = HS_DEACTIVATED; } } return ret; } static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) { int ret = 0; if (!skb || !skb->data) return -EINVAL; if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) { BT_ERR("Tx Error: Bad skb length %d : %d", skb->len, BTM_UPLD_SIZE); return -EINVAL; } if (skb_headroom(skb) < BTM_HEADER_LEN) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, BTM_HEADER_LEN); if (!skb) { BT_ERR("Tx Error: realloc_headroom failed %d", BTM_HEADER_LEN); skb = tmp; return -EINVAL; } kfree_skb(tmp); } skb_push(skb, BTM_HEADER_LEN); /* header type: byte[3] * HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor * header length: byte[2][1][0] */ skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; skb->data[3] = bt_cb(skb)->pkt_type; if (priv->hw_host_to_card) ret = priv->hw_host_to_card(priv, skb->data, skb->len); return ret; } static void btmrvl_init_adapter(struct btmrvl_private *priv) { skb_queue_head_init(&priv->adapter->tx_queue); priv->adapter->ps_state = PS_AWAKE; init_waitqueue_head(&priv->adapter->cmd_wait_q); } static void btmrvl_free_adapter(struct btmrvl_private *priv) { skb_queue_purge(&priv->adapter->tx_queue); kfree(priv->adapter); priv->adapter = NULL; } static int btmrvl_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } static void btmrvl_destruct(struct hci_dev *hdev) { } static int btmrvl_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btmrvl_private *priv = NULL; BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len); if (!hdev || !hdev->driver_data) { BT_ERR("Frame for unknown HCI device"); return -ENODEV; } priv = (struct btmrvl_private *) hdev->driver_data; if (!test_bit(HCI_RUNNING, &hdev->flags)) { BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags); print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET, skb->data, skb->len); return -EBUSY; } switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } skb_queue_tail(&priv->adapter->tx_queue, skb); wake_up_interruptible(&priv->main_thread.wait_q); return 0; } static int btmrvl_flush(struct hci_dev *hdev) { struct btmrvl_private *priv = hdev->driver_data; skb_queue_purge(&priv->adapter->tx_queue); return 0; } static int btmrvl_close(struct hci_dev *hdev) { struct btmrvl_private *priv = hdev->driver_data; if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; skb_queue_purge(&priv->adapter->tx_queue); return 0; } static int btmrvl_open(struct hci_dev *hdev) { set_bit(HCI_RUNNING, &hdev->flags); return 0; } /* * This function handles the event generated by firmware, rx data * received from firmware, and tx data sent from kernel. */ static int btmrvl_service_main_thread(void *data) { struct btmrvl_thread *thread = data; struct btmrvl_private *priv = thread->priv; struct btmrvl_adapter *adapter = priv->adapter; wait_queue_t wait; struct sk_buff *skb; ulong flags; init_waitqueue_entry(&wait, current); current->flags |= PF_NOFREEZE; for (;;) { add_wait_queue(&thread->wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); if (adapter->wakeup_tries || ((!adapter->int_count) && (!priv->btmrvl_dev.tx_dnld_rdy || skb_queue_empty(&adapter->tx_queue)))) { BT_DBG("main_thread is sleeping..."); schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&thread->wait_q, &wait); BT_DBG("main_thread woke up"); if (kthread_should_stop()) { BT_DBG("main_thread: break from main thread"); break; } spin_lock_irqsave(&priv->driver_lock, flags); if (adapter->int_count) { adapter->int_count = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); priv->hw_process_int_status(priv); } else if (adapter->ps_state == PS_SLEEP && !skb_queue_empty(&adapter->tx_queue)) { spin_unlock_irqrestore(&priv->driver_lock, flags); adapter->wakeup_tries++; priv->hw_wakeup_firmware(priv); continue; } else { spin_unlock_irqrestore(&priv->driver_lock, flags); } if (adapter->ps_state == PS_SLEEP) continue; if (!priv->btmrvl_dev.tx_dnld_rdy) continue; skb = skb_dequeue(&adapter->tx_queue); if (skb) { if (btmrvl_tx_pkt(priv, skb)) priv->btmrvl_dev.hcidev->stat.err_tx++; else priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len; kfree_skb(skb); } } return 0; } int btmrvl_register_hdev(struct btmrvl_private *priv) { struct hci_dev *hdev = NULL; int ret; hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can not allocate HCI device"); goto err_hdev; } priv->btmrvl_dev.hcidev = hdev; hdev->driver_data = priv; hdev->bus = HCI_SDIO; hdev->open = btmrvl_open; hdev->close = btmrvl_close; hdev->flush = btmrvl_flush; hdev->send = btmrvl_send_frame; hdev->destruct = btmrvl_destruct; hdev->ioctl = btmrvl_ioctl; hdev->owner = THIS_MODULE; btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); hdev->dev_type = priv->btmrvl_dev.dev_type; ret = hci_register_dev(hdev); if (ret < 0) { BT_ERR("Can not register HCI device"); goto err_hci_register_dev; } #ifdef CONFIG_DEBUG_FS btmrvl_debugfs_init(hdev); #endif return 0; err_hci_register_dev: hci_free_dev(hdev); err_hdev: /* Stop the thread servicing the interrupts */ kthread_stop(priv->main_thread.task); btmrvl_free_adapter(priv); kfree(priv); return -ENOMEM; } EXPORT_SYMBOL_GPL(btmrvl_register_hdev); struct btmrvl_private *btmrvl_add_card(void *card) { struct btmrvl_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { BT_ERR("Can not allocate priv"); goto err_priv; } priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL); if (!priv->adapter) { BT_ERR("Allocate buffer for btmrvl_adapter failed!"); goto err_adapter; } btmrvl_init_adapter(priv); BT_DBG("Starting kthread..."); priv->main_thread.priv = priv; spin_lock_init(&priv->driver_lock); init_waitqueue_head(&priv->main_thread.wait_q); priv->main_thread.task = kthread_run(btmrvl_service_main_thread, &priv->main_thread, "btmrvl_main_service"); priv->btmrvl_dev.card = card; priv->btmrvl_dev.tx_dnld_rdy = true; return priv; err_adapter: kfree(priv); err_priv: return NULL; } EXPORT_SYMBOL_GPL(btmrvl_add_card); int btmrvl_remove_card(struct btmrvl_private *priv) { struct hci_dev *hdev; hdev = priv->btmrvl_dev.hcidev; wake_up_interruptible(&priv->adapter->cmd_wait_q); kthread_stop(priv->main_thread.task); #ifdef CONFIG_DEBUG_FS btmrvl_debugfs_remove(hdev); #endif hci_unregister_dev(hdev); hci_free_dev(hdev); priv->btmrvl_dev.hcidev = NULL; btmrvl_free_adapter(priv); kfree(priv); return 0; } EXPORT_SYMBOL_GPL(btmrvl_remove_card); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell Bluetooth driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL v2");
gpl-2.0