repo_name
string
path
string
copies
string
size
string
content
string
license
string
ndmsystems/linux-2.6.22-tc
arch/i386/kernel/cpu/cpufreq/longhaul.c
35
22471
/* * (C) 2001-2004 Dave Jones. <davej@codemonkey.org.uk> * (C) 2002 Padraig Brady. <padraig@antefacto.com> * * Licensed under the terms of the GNU GPL License version 2. * Based upon datasheets & sample CPUs kindly provided by VIA. * * VIA have currently 3 different versions of Longhaul. * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. * Version 2 of longhaul is backward compatible with v1, but adds * LONGHAUL MSR for purpose of both frequency and voltage scaling. * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C). * Version 3 of longhaul got renamed to Powersaver and redesigned * to use only the POWERSAVER MSR at 0x110a. * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. * It's pretty much the same feature wise to longhaul v2, though * there is provision for scaling FSB too, but this doesn't work * too well in practice so we don't even try to use this. * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/msr.h> #include <asm/timex.h> #include <asm/io.h> #include <asm/acpi.h> #include <linux/acpi.h> #include <acpi/processor.h> #include "longhaul.h" #define PFX "longhaul: " #define TYPE_LONGHAUL_V1 1 #define TYPE_LONGHAUL_V2 2 #define TYPE_POWERSAVER 3 #define CPU_SAMUEL 1 #define CPU_SAMUEL2 2 #define CPU_EZRA 3 #define CPU_EZRA_T 4 #define CPU_NEHEMIAH 5 #define CPU_NEHEMIAH_C 6 /* Flags */ #define USE_ACPI_C3 (1 << 1) #define USE_NORTHBRIDGE (1 << 2) #define USE_VT8235 (1 << 3) static int cpu_model; static unsigned int numscales=16; static unsigned int fsb; static const struct mV_pos *vrm_mV_table; static const unsigned char *mV_vrm_table; struct f_msr { u8 vrm; u8 pos; }; static struct f_msr f_msr_table[32]; static unsigned int highest_speed, lowest_speed; /* kHz */ static unsigned int minmult, maxmult; static int can_scale_voltage; static struct acpi_processor *pr = NULL; static struct acpi_processor_cx *cx = NULL; static u8 longhaul_flags; static u8 longhaul_pos; /* Module parameters */ static int scale_voltage; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) /* Clock ratios multiplied by 10 */ static int clock_ratio[32]; static int eblcr_table[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; #ifdef CONFIG_CPU_FREQ_DEBUG static char speedbuffer[8]; static char *print_speed(int speed) { if (speed < 1000) { snprintf(speedbuffer, sizeof(speedbuffer),"%dMHz", speed); return speedbuffer; } if (speed%1000 == 0) snprintf(speedbuffer, sizeof(speedbuffer), "%dGHz", speed/1000); else snprintf(speedbuffer, sizeof(speedbuffer), "%d.%dGHz", speed/1000, (speed%1000)/100); return speedbuffer; } #endif static unsigned int calc_speed(int mult) { int khz; khz = (mult/10)*fsb; if (mult%10) khz += fsb/2; khz *= 1000; return khz; } static int longhaul_get_cpu_mult(void) { unsigned long invalue=0,lo, hi; rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) { if (lo & (1<<27)) invalue+=16; } return eblcr_table[invalue]; } /* For processor with BCR2 MSR */ static void do_longhaul1(unsigned int clock_ratio_index) { union msr_bcr2 bcr2; rdmsrl(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; bcr2.bits.CLOCKMUL = clock_ratio_index; /* Sync to timer tick */ safe_halt(); /* Change frequency on next halt or sleep */ wrmsrl(MSR_VIA_BCR2, bcr2.val); /* Invoke transition */ ACPI_FLUSH_CPU_CACHE(); halt(); /* Disable software clock multiplier */ local_irq_disable(); rdmsrl(MSR_VIA_BCR2, bcr2.val); bcr2.bits.ESOFTBF = 0; wrmsrl(MSR_VIA_BCR2, bcr2.val); } /* For processor with Longhaul MSR */ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) { union msr_longhaul longhaul; u8 dest_pos; u32 t; dest_pos = f_msr_table[clock_ratio_index].pos; rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ longhaul.bits.RevisionKey = longhaul.bits.RevisionID; longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; /* Setup new voltage */ if (can_scale_voltage) longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm; /* Sync to timer tick */ safe_halt(); /* Raise voltage if necessary */ if (can_scale_voltage && longhaul_pos < dest_pos) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 * read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); longhaul_pos = dest_pos; } /* Change frequency on next halt or sleep */ longhaul.bits.EnableSoftBusRatio = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } /* Disable bus ratio bit */ longhaul.bits.EnableSoftBusRatio = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ if (can_scale_voltage && longhaul_pos > dest_pos) { longhaul.bits.EnableSoftVID = 1; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); } else { ACPI_FLUSH_CPU_CACHE(); /* Invoke C3 */ inb(cx_address); /* Dummy op - must do something useless after P_LVL3 * read */ t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); longhaul_pos = dest_pos; } } /** * longhaul_set_cpu_frequency() * @clock_ratio_index : bitpattern of the new multiplier. * * Sets a new clock ratio. */ static void longhaul_setstate(unsigned int clock_ratio_index) { int speed, mult; struct cpufreq_freqs freqs; static unsigned int old_ratio=-1; unsigned long flags; unsigned int pic1_mask, pic2_mask; if (old_ratio == clock_ratio_index) return; old_ratio = clock_ratio_index; mult = clock_ratio[clock_ratio_index]; if (mult == -1) return; speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) return; freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; freqs.cpu = 0; /* longhaul.c is UP only driver */ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); preempt_disable(); local_irq_save(flags); pic2_mask = inb(0xA1); pic1_mask = inb(0x21); /* works on C3. save mask. */ outb(0xFF,0xA1); /* Overkill */ outb(0xFE,0x21); /* TMR0 only */ if (longhaul_flags & USE_NORTHBRIDGE) { /* Disable AGP and PCI arbiters */ outb(3, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } switch (longhaul_version) { /* * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) * Software controlled multipliers only. */ case TYPE_LONGHAUL_V1: do_longhaul1(clock_ratio_index); break; /* * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C] * * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) * Nehemiah can do FSB scaling too, but this has never been proven * to work in practice. */ case TYPE_LONGHAUL_V2: case TYPE_POWERSAVER: if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); do_powersaver(cx->address, clock_ratio_index); } else { do_powersaver(0, clock_ratio_index); } break; } if (longhaul_flags & USE_NORTHBRIDGE) { /* Enable arbiters */ outb(0, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Enable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); } outb(pic2_mask,0xA1); /* restore mask */ outb(pic1_mask,0x21); local_irq_restore(flags); preempt_enable(); freqs.new = calc_speed(longhaul_get_cpu_mult()); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } /* * Centaur decided to make life a little more tricky. * Only longhaul v1 is allowed to read EBLCR BSEL[0:1]. * Samuel2 and above have to try and guess what the FSB is. * We do this by assuming we booted at maximum multiplier, and interpolate * between that value multiplied by possible FSBs and cpu_mhz which * was calculated at boot time. Really ugly, but no other way to do this. */ #define ROUNDING 0xf static int guess_fsb(int mult) { int speed = cpu_khz / 1000; int i; int speeds[] = { 666, 1000, 1333, 2000 }; int f_max, f_min; for (i = 0; i < 4; i++) { f_max = ((speeds[i] * mult) + 50) / 100; f_max += (ROUNDING / 2); f_min = f_max - ROUNDING; if ((speed <= f_max) && (speed >= f_min)) return speeds[i] / 10; } return 0; } static int __init longhaul_get_ranges(void) { unsigned int j, k = 0; int mult; /* Get current frequency */ mult = longhaul_get_cpu_mult(); if (mult == -1) { printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); return -EINVAL; } fsb = guess_fsb(mult); if (fsb == 0) { printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); return -EINVAL; } /* Get max multiplier - as we always did. * Longhaul MSR is usefull only when voltage scaling is enabled. * C3 is booting at max anyway. */ maxmult = mult; /* Get min multiplier */ switch (cpu_model) { case CPU_NEHEMIAH: minmult = 50; break; case CPU_NEHEMIAH_C: minmult = 40; break; default: minmult = 30; break; } dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); if (lowest_speed == highest_speed) { printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n"); return -EINVAL; } if (lowest_speed > highest_speed) { printk (KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", lowest_speed, highest_speed); return -EINVAL; } longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); if(!longhaul_table) return -ENOMEM; for (j=0; j < numscales; j++) { unsigned int ratio; ratio = clock_ratio[j]; if (ratio == -1) continue; if (ratio > maxmult || ratio < minmult) continue; longhaul_table[k].frequency = calc_speed(ratio); longhaul_table[k].index = j; k++; } longhaul_table[k].frequency = CPUFREQ_TABLE_END; if (!k) { kfree (longhaul_table); return -EINVAL; } return 0; } static void __init longhaul_setup_voltagescaling(void) { union msr_longhaul longhaul; struct mV_pos minvid, maxvid; unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); if (!(longhaul.bits.RevisionID & 1)) { printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n"); return; } if (!longhaul.bits.VRMRev) { printk (KERN_INFO PFX "VRM 8.5\n"); vrm_mV_table = &vrm85_mV[0]; mV_vrm_table = &mV_vrm85[0]; } else { printk (KERN_INFO PFX "Mobile VRM\n"); if (cpu_model < CPU_NEHEMIAH) return; vrm_mV_table = &mobilevrm_mV[0]; mV_vrm_table = &mV_mobilevrm[0]; } minvid = vrm_mV_table[longhaul.bits.MinimumVID]; maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " "Voltage scaling disabled.\n", minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000); return; } if (minvid.mV == maxvid.mV) { printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are " "both %d.%03d. Voltage scaling disabled\n", maxvid.mV/1000, maxvid.mV%1000); return; } /* How many voltage steps */ numvscales = maxvid.pos - minvid.pos + 1; printk(KERN_INFO PFX "Max VID=%d.%03d " "Min VID=%d.%03d, " "%d possible voltage scales\n", maxvid.mV/1000, maxvid.mV%1000, minvid.mV/1000, minvid.mV%1000, numvscales); /* Calculate max frequency at min voltage */ j = longhaul.bits.MinMHzBR; if (longhaul.bits.MinMHzBR4) j += 16; min_vid_speed = eblcr_table[j]; if (min_vid_speed == -1) return; switch (longhaul.bits.MinMHzFSB) { case 0: min_vid_speed *= 13333; break; case 1: min_vid_speed *= 10000; break; case 3: min_vid_speed *= 6666; break; default: return; break; } if (min_vid_speed >= highest_speed) return; /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; j = 0; while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { speed = longhaul_table[j].frequency; if (speed > min_vid_speed) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; else pos = minvid.pos; f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos]; f_msr_table[longhaul_table[j].index].pos = pos; j++; } longhaul_pos = maxvid.pos; can_scale_voltage = 1; printk(KERN_INFO PFX "Voltage scaling enabled. " "Use of \"conservative\" governor is highly recommended.\n"); } static int longhaul_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, longhaul_table); } static int longhaul_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int table_index = 0; unsigned int new_clock_ratio = 0; if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) return -EINVAL; new_clock_ratio = longhaul_table[table_index].index & 0xFF; longhaul_setstate(new_clock_ratio); return 0; } static unsigned int longhaul_get(unsigned int cpu) { if (cpu) return 0; return calc_speed(longhaul_get_cpu_mult()); } static acpi_status longhaul_walk_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_device *d; if ( acpi_bus_get_device(obj_handle, &d) ) { return 0; } *return_value = (void *)acpi_driver_data(d); return 1; } /* VIA don't support PM2 reg, but have something similar */ static int enable_arbiter_disable(void) { struct pci_dev *dev; int status; int reg; u8 pci_cmd; status = 1; /* Find PLE133 host bridge */ reg = 0x78; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL); /* Find CLE266 host bridge */ if (dev == NULL) { reg = 0x76; dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); /* Find CN400 V-Link host bridge */ if (dev == NULL) dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); } if (dev != NULL) { /* Enable access to port 0x22 */ pci_read_config_byte(dev, reg, &pci_cmd); if (!(pci_cmd & 1<<7)) { pci_cmd |= 1<<7; pci_write_config_byte(dev, reg, pci_cmd); pci_read_config_byte(dev, reg, &pci_cmd); if (!(pci_cmd & 1<<7)) { printk(KERN_ERR PFX "Can't enable access to port 0x22.\n"); status = 0; } } pci_dev_put(dev); return status; } return 0; } static int longhaul_setup_vt8235(void) { struct pci_dev *dev; u8 pci_cmd; /* Find VT8235 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); if (dev != NULL) { /* Set transition time to max */ pci_read_config_byte(dev, 0xec, &pci_cmd); pci_cmd &= ~(1 << 2); pci_write_config_byte(dev, 0xec, pci_cmd); pci_read_config_byte(dev, 0xe4, &pci_cmd); pci_cmd &= ~(1 << 7); pci_write_config_byte(dev, 0xe4, pci_cmd); pci_read_config_byte(dev, 0xe5, &pci_cmd); pci_cmd |= 1 << 7; pci_write_config_byte(dev, 0xe5, pci_cmd); pci_dev_put(dev); return 1; } return 0; } static int __init longhaul_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = cpu_data; char *cpuname=NULL; int ret; u32 lo, hi; int vt8235_present; /* Check what we have on this motherboard */ switch (c->x86_model) { case 6: cpu_model = CPU_SAMUEL; cpuname = "C3 'Samuel' [C5A]"; longhaul_version = TYPE_LONGHAUL_V1; memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); break; case 7: switch (c->x86_mask) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; /* Note, this is not a typo, early Samuel2's had * Samuel1 ratios. */ memcpy(clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); memcpy(eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr)); break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V1; if (c->x86_mask < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { cpu_model = CPU_EZRA; cpuname = "C3 'Ezra' [C5C]"; } memcpy(clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio)); memcpy(eblcr_table, ezra_eblcr, sizeof(ezra_eblcr)); break; } break; case 8: cpu_model = CPU_EZRA_T; cpuname = "C3 'Ezra-T' [C5M]"; longhaul_version = TYPE_POWERSAVER; numscales=32; memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio)); memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr)); break; case 9: longhaul_version = TYPE_POWERSAVER; numscales = 32; memcpy(clock_ratio, nehemiah_clock_ratio, sizeof(nehemiah_clock_ratio)); memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr)); switch (c->x86_mask) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; break; case 2 ... 4: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah B' [C5XLOH]"; break; case 5 ... 15: cpu_model = CPU_NEHEMIAH_C; cpuname = "C3 'Nehemiah C' [C5P]"; break; } break; default: cpuname = "Unknown"; break; } /* Check Longhaul ver. 2 */ if (longhaul_version == TYPE_LONGHAUL_V2) { rdmsr(MSR_VIA_LONGHAUL, lo, hi); if (lo == 0 && hi == 0) /* Looks like MSR isn't present */ longhaul_version = TYPE_LONGHAUL_V1; } printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); switch (longhaul_version) { case TYPE_LONGHAUL_V1: case TYPE_LONGHAUL_V2: printk ("Longhaul v%d supported.\n", longhaul_version); break; case TYPE_POWERSAVER: printk ("Powersaver supported.\n"); break; }; /* Doesn't hurt */ vt8235_present = longhaul_setup_vt8235(); /* Find ACPI data for processor */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, &longhaul_walk_callback, NULL, (void *)&pr); /* Check ACPI support for C3 state */ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { cx = &pr->power.states[ACPI_STATE_C3]; if (cx->address > 0 && cx->latency <= 1000) { longhaul_flags |= USE_ACPI_C3; goto print_support_type; } } /* Check if northbridge is friendly */ if (enable_arbiter_disable()) { longhaul_flags |= USE_NORTHBRIDGE; goto print_support_type; } /* Use VT8235 southbridge if present */ if (longhaul_version == TYPE_POWERSAVER && vt8235_present) { longhaul_flags |= USE_VT8235; goto print_support_type; } /* Check ACPI support for bus master arbiter disable */ if ((pr == NULL) || !(pr->flags.bm_control)) { printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge.\n"); return -ENODEV; } print_support_type: if (longhaul_flags & USE_NORTHBRIDGE) printk (KERN_INFO PFX "Using northbridge support.\n"); else if (longhaul_flags & USE_VT8235) printk (KERN_INFO PFX "Using VT8235 support.\n"); else printk (KERN_INFO PFX "Using ACPI support.\n"); ret = longhaul_get_ranges(); if (ret != 0) return ret; if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) longhaul_setup_voltagescaling(); policy->governor = CPUFREQ_DEFAULT_GOVERNOR; policy->cpuinfo.transition_latency = 200000; /* nsec */ policy->cur = calc_speed(longhaul_get_cpu_mult()); ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); if (ret) return ret; cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); return 0; } static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr* longhaul_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver longhaul_driver = { .verify = longhaul_verify, .target = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, .exit = __devexit_p(longhaul_cpu_exit), .name = "longhaul", .owner = THIS_MODULE, .attr = longhaul_attr, }; static int __init longhaul_init(void) { struct cpuinfo_x86 *c = cpu_data; if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) return -ENODEV; #ifdef CONFIG_SMP if (num_online_cpus() > 1) { printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); return -ENODEV; } #endif #ifdef CONFIG_X86_IO_APIC if (cpu_has_apic) { printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); return -ENODEV; } #endif switch (c->x86_model) { case 6 ... 9: return cpufreq_register_driver(&longhaul_driver); case 10: printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); default: ;; } return -ENODEV; } static void __exit longhaul_exit(void) { int i; for (i=0; i < numscales; i++) { if (clock_ratio[i] == maxmult) { longhaul_setstate(i); break; } } cpufreq_unregister_driver(&longhaul_driver); kfree(longhaul_table); } module_param (scale_voltage, int, 0644); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); MODULE_LICENSE ("GPL"); late_initcall(longhaul_init); module_exit(longhaul_exit);
gpl-2.0
aktau/pcsx2
fps2bios/kernel/eeload/eeelf.c
35
9762
#include "romdir.h" #include "eedebug.h" typedef struct { u8 e_ident[16]; //0x7f,"ELF" (ELF file identifier) u16 e_type; //ELF type: 0=NONE, 1=REL, 2=EXEC, 3=SHARED, 4=CORE u16 e_machine; //Processor: 8=MIPS R3000 u32 e_version; //Version: 1=current u32 e_entry; //Entry point address u32 e_phoff; //Start of program headers (offset from file start) u32 e_shoff; //Start of section headers (offset from file start) u32 e_flags; //Processor specific flags = 0x20924001 noreorder, mips u16 e_ehsize; //ELF header size (0x34 = 52 bytes) u16 e_phentsize; //Program headers entry size u16 e_phnum; //Number of program headers u16 e_shentsize; //Section headers entry size u16 e_shnum; //Number of section headers u16 e_shstrndx; //Section header stringtable index } ELF_HEADER; typedef struct { u32 p_type; //see notes1 u32 p_offset; //Offset from file start to program segment. u32 p_vaddr; //Virtual address of the segment u32 p_paddr; //Physical address of the segment u32 p_filesz; //Number of bytes in the file image of the segment u32 p_memsz; //Number of bytes in the memory image of the segment u32 p_flags; //Flags for segment u32 p_align; //Alignment. The address of 0x08 and 0x0C must fit this alignment. 0=no alignment } ELF_PHR; /* notes1 ------ 0=Inactive 1=Load the segment into memory, no. of bytes specified by 0x10 and 0x14 2=Dynamic linking 3=Interpreter. The array element must specify a path name 4=Note. The array element must specify the location and size of aux. info 5=reserved 6=The array element must specify location and size of the program header table. */ typedef struct { u32 sh_name; //No. to the index of the Section header stringtable index u32 sh_type; //See notes2 u32 sh_flags; //see notes3 u32 sh_addr; //Section start address u32 sh_offset; //Offset from start of file to section u32 sh_size; //Size of section u32 sh_link; //Section header table index link u32 sh_info; //Info u32 sh_addralign; //Alignment. The adress of 0x0C must fit this alignment. 0=no alignment. u32 sh_entsize; //Fixed size entries. } ELF_SHR; /* notes 2 ------- Type: 0=Inactive 1=PROGBITS 2=SYMTAB symbol table 3=STRTAB string table 4=RELA relocation entries 5=HASH hash table 6=DYNAMIC dynamic linking information 7=NOTE 8=NOBITS 9=REL relocation entries 10=SHLIB 0x70000000=LOPROC processor specifc 0x7fffffff=HIPROC 0x80000000=LOUSER lower bound 0xffffffff=HIUSER upper bound notes 3 ------- Section Flags: (1 bit, you may combine them like 3 = alloc & write permission) 1=Write section contains data the is be writeable during execution. 2=Alloc section occupies memory during execution 4=Exec section contains executable instructions 0xf0000000=Mask bits processor-specific */ typedef struct { u32 st_name; u32 st_value; u32 st_size; u8 st_info; u8 st_other; u16 st_shndx; } Elf32_Sym; #define ELF32_ST_TYPE(i) ((i)&0xf) typedef struct { u32 r_offset; u32 r_info; } Elf32_Rel; char *sections_names; ELF_HEADER *elfHeader; ELF_PHR *elfProgH; ELF_SHR *elfSectH; u8 *elfdata; int elfsize; static void __memcpy(void *dest, const void *src, int n) { const u8 *s = (u8*)src; u8 *d = (u8*)dest; while (n) { *d++ = *s++; n--; } } int loadHeaders() { elfHeader = (ELF_HEADER*)elfdata; if ((elfHeader->e_shentsize != sizeof(ELF_SHR)) && (elfHeader->e_shnum > 0)) { return -1; } #ifdef ELF_LOG ELF_LOG( "type: " ); #endif switch( elfHeader->e_type ) { default: #ifdef ELF_LOG ELF_LOG( "unknown %x", elfHeader->e_type ); #endif break; case 0x0: #ifdef ELF_LOG ELF_LOG( "no file type" ); #endif break; case 0x1: #ifdef ELF_LOG ELF_LOG( "relocatable" ); #endif break; case 0x2: #ifdef ELF_LOG ELF_LOG( "executable" ); #endif break; } #ifdef ELF_LOG ELF_LOG( "\n" ); ELF_LOG( "machine: " ); #endif switch ( elfHeader->e_machine ) { default: #ifdef ELF_LOG ELF_LOG( "unknown" ); #endif break; case 0x8: #ifdef ELF_LOG ELF_LOG( "mips_rs3000" ); #endif break; } #ifdef ELF_LOG ELF_LOG("\n"); ELF_LOG("version: %d\n",elfHeader->e_version); ELF_LOG("entry: %08x\n",elfHeader->e_entry); ELF_LOG("flags: %08x\n",elfHeader->e_flags); ELF_LOG("eh size: %08x\n",elfHeader->e_ehsize); ELF_LOG("ph off: %08x\n",elfHeader->e_phoff); ELF_LOG("ph entsiz: %08x\n",elfHeader->e_phentsize); ELF_LOG("ph num: %08x\n",elfHeader->e_phnum); ELF_LOG("sh off: %08x\n",elfHeader->e_shoff); ELF_LOG("sh entsiz: %08x\n",elfHeader->e_shentsize); ELF_LOG("sh num: %08x\n",elfHeader->e_shnum); ELF_LOG("sh strndx: %08x\n",elfHeader->e_shstrndx); ELF_LOG("\n"); #endif return 0; } int loadProgramHeaders() { int i; if (elfHeader->e_phnum == 0) { return 0; } if (elfHeader->e_phentsize != sizeof(ELF_PHR)) { return -1; } elfProgH = (ELF_PHR*)&elfdata[elfHeader->e_phoff]; for ( i = 0 ; i < elfHeader->e_phnum ; i++ ) { #ifdef ELF_LOG ELF_LOG( "Elf32 Program Header\n" ); ELF_LOG( "type: " ); #endif switch ( elfProgH[ i ].p_type ) { default: #ifdef ELF_LOG ELF_LOG( "unknown %x", (int)elfProgH[ i ].p_type ); #endif break; case 0x1: #ifdef ELF_LOG ELF_LOG("load"); #endif /* if ( elfHeader->e_shnum == 0 ) {*/ if (elfProgH[ i ].p_offset < elfsize) { int size; if ((elfProgH[ i ].p_filesz + elfProgH[ i ].p_offset) > elfsize) { size = elfsize - elfProgH[ i ].p_offset; } else { size = elfProgH[ i ].p_filesz; } // __printf("loading program to %x\n", elfProgH[ i ].p_paddr + elfbase); __memcpy(elfProgH[ i ].p_paddr, &elfdata[elfProgH[ i ].p_offset], size); } #ifdef ELF_LOG ELF_LOG("\t*LOADED*"); #endif // } break; } #ifdef ELF_LOG ELF_LOG("\n"); ELF_LOG("offset: %08x\n",(int)elfProgH[i].p_offset); ELF_LOG("vaddr: %08x\n",(int)elfProgH[i].p_vaddr); ELF_LOG("paddr: %08x\n",elfProgH[i].p_paddr); ELF_LOG("file size: %08x\n",elfProgH[i].p_filesz); ELF_LOG("mem size: %08x\n",elfProgH[i].p_memsz); ELF_LOG("flags: %08x\n",elfProgH[i].p_flags); ELF_LOG("palign: %08x\n",elfProgH[i].p_align); ELF_LOG("\n"); #endif } return 0; } int loadSectionHeaders() { int i; int i_st = -1; int i_dt = -1; if (elfHeader->e_shnum == 0) { return -1; } elfSectH = (ELF_SHR*)&elfdata[elfHeader->e_shoff]; if ( elfHeader->e_shstrndx < elfHeader->e_shnum ) { sections_names = (char *)&elfdata[elfSectH[ elfHeader->e_shstrndx ].sh_offset]; } for ( i = 0 ; i < elfHeader->e_shnum ; i++ ) { #ifdef ELF_LOG ELF_LOG( "Elf32 Section Header [%x] %s", i, &sections_names[ elfSectH[ i ].sh_name ] ); #endif /* if ( elfSectH[i].sh_flags & 0x2 ) { if (elfSectH[i].sh_offset < elfsize) { int size; if ((elfSectH[i].sh_size + elfSectH[i].sh_offset) > elfsize) { size = elfsize - elfSectH[i].sh_offset; } else { size = elfSectH[i].sh_size; } memcpy(&psM[ elfSectH[ i ].sh_addr &0x1ffffff ], &elfdata[elfSectH[i].sh_offset], size); } #ifdef ELF_LOG ELF_LOG( "\t*LOADED*" ); #endif }*/ #ifdef ELF_LOG ELF_LOG("\n"); ELF_LOG("type: "); #endif switch ( elfSectH[ i ].sh_type ) { default: #ifdef ELF_LOG ELF_LOG("unknown %08x",elfSectH[i].sh_type); #endif break; case 0x0: #ifdef ELF_LOG ELF_LOG("null"); #endif break; case 0x1: #ifdef ELF_LOG ELF_LOG("progbits"); #endif break; case 0x2: #ifdef ELF_LOG ELF_LOG("symtab"); #endif break; case 0x3: #ifdef ELF_LOG ELF_LOG("strtab"); #endif break; case 0x4: #ifdef ELF_LOG ELF_LOG("rela"); #endif break; case 0x8: #ifdef ELF_LOG ELF_LOG("no bits"); #endif break; case 0x9: #ifdef ELF_LOG ELF_LOG("rel"); #endif break; } #ifdef ELF_LOG ELF_LOG("\n"); ELF_LOG("flags: %08x\n", elfSectH[i].sh_flags); ELF_LOG("addr: %08x\n", elfSectH[i].sh_addr); ELF_LOG("offset: %08x\n", elfSectH[i].sh_offset); ELF_LOG("size: %08x\n", elfSectH[i].sh_size); ELF_LOG("link: %08x\n", elfSectH[i].sh_link); ELF_LOG("info: %08x\n", elfSectH[i].sh_info); ELF_LOG("addralign: %08x\n", elfSectH[i].sh_addralign); ELF_LOG("entsize: %08x\n", elfSectH[i].sh_entsize); #endif // dump symbol table if (elfSectH[i].sh_type == 0x02) { i_st = i; i_dt = elfSectH[i].sh_link; } /* if (elfSectH[i].sh_type == 0x01) { int size = elfSectH[i].sh_size / 4; u32 *ptr = (u32*)&psxM[(elfSectH[i].sh_addr + irx_addr) & 0x1fffff]; while (size) { if (*ptr == 0x41e00000) { // import func int ret = iopSetImportFunc(ptr+1); size-= ret; ptr+= ret; } if (*ptr == 0x41c00000) { // export func int ret = iopSetExportFunc(ptr+1); size-= ret; ptr+= ret; } size--; ptr++; } } */ /* if (!strcmp(".data", &sections_names[elfSectH[i].sh_name])) { // seems so.. psxRegs.GPR.n.gp = 0x8000 + irx_addr + elfSectH[i].sh_addr; }*/ } return 0; } u32 loadElfFile(char *filename, struct elfinfo *info) { struct rominfo ri; char str[256]; char str2[256]; int i; __printf("loadElfFile: %s\n", filename); if (romdirGetFile(filename, &ri) == NULL) { __printf("file %s not found!!\n", filename); return -1; } elfdata = (u8*)(0xbfc00000 + ri.fileOffset); elfsize = ri.fileSize; loadHeaders(); loadProgramHeaders(); loadSectionHeaders(); // __printf("loadElfFile: e_entry=%x\n", elfHeader->e_entry); return elfHeader->e_entry; }
gpl-2.0
leemchaehoon/linux_m
mm/mprotect.c
291
10801
/* * mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/mempolicy.h> #include <linux/personality.h> #include <linux/syscalls.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/perf_event.h> #include <linux/ksm.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /* * For a prot_numa update we only hold mmap_sem for read so there is a * potential race with faulting where a pmd was temporarily none. This * function checks for a transhuge pmd under the appropriate lock. It * returns a pte if it was successfully locked or NULL if it raced with * a transhuge insertion. */ static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, int prot_numa, spinlock_t **ptl) { pte_t *pte; spinlock_t *pmdl; /* !prot_numa is protected by mmap_sem held for write */ if (!prot_numa) return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); pmdl = pmd_lock(vma->vm_mm, pmd); if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { spin_unlock(pmdl); return NULL; } pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); spin_unlock(pmdl); return pte; } static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { struct mm_struct *mm = vma->vm_mm; pte_t *pte, oldpte; spinlock_t *ptl; unsigned long pages = 0; pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); if (!pte) return 0; arch_enter_lazy_mmu_mode(); do { oldpte = *pte; if (pte_present(oldpte)) { pte_t ptent; bool preserve_write = prot_numa && pte_write(oldpte); /* * Avoid trapping faults against the zero or KSM * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { struct page *page; page = vm_normal_page(vma, addr, oldpte); if (!page || PageKsm(page)) continue; /* Avoid TLB flush if possible */ if (pte_protnone(oldpte)) continue; } ptent = ptep_modify_prot_start(mm, addr, pte); ptent = pte_modify(ptent, newprot); if (preserve_write) ptent = pte_mkwrite(ptent); /* Avoid taking write faults for known dirty pages */ if (dirty_accountable && pte_dirty(ptent) && (pte_soft_dirty(ptent) || !(vma->vm_flags & VM_SOFTDIRTY))) { ptent = pte_mkwrite(ptent); } ptep_modify_prot_commit(mm, addr, pte, ptent); pages++; } else if (IS_ENABLED(CONFIG_MIGRATION)) { swp_entry_t entry = pte_to_swp_entry(oldpte); if (is_write_migration_entry(entry)) { pte_t newpte; /* * A protection check is difficult so * just be safe and disable write */ make_migration_entry_read(&entry); newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); set_pte_at(mm, addr, pte, newpte); pages++; } } } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); return pages; } static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { pmd_t *pmd; struct mm_struct *mm = vma->vm_mm; unsigned long next; unsigned long pages = 0; unsigned long nr_huge_updates = 0; unsigned long mni_start = 0; pmd = pmd_offset(pud, addr); do { unsigned long this_pages; next = pmd_addr_end(addr, end); if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd)) continue; /* invoke the mmu notifier if the pmd is populated */ if (!mni_start) { mni_start = addr; mmu_notifier_invalidate_range_start(mm, mni_start, end); } if (pmd_trans_huge(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) split_huge_page_pmd(vma, addr, pmd); else { int nr_ptes = change_huge_pmd(vma, pmd, addr, newprot, prot_numa); if (nr_ptes) { if (nr_ptes == HPAGE_PMD_NR) { pages += HPAGE_PMD_NR; nr_huge_updates++; } /* huge pmd was handled */ continue; } } /* fall through, the trans huge pmd just split */ } this_pages = change_pte_range(vma, pmd, addr, next, newprot, dirty_accountable, prot_numa); pages += this_pages; } while (pmd++, addr = next, addr != end); if (mni_start) mmu_notifier_invalidate_range_end(mm, mni_start, end); if (nr_huge_updates) count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); return pages; } static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { pud_t *pud; unsigned long next; unsigned long pages = 0; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; pages += change_pmd_range(vma, pud, addr, next, newprot, dirty_accountable, prot_numa); } while (pud++, addr = next, addr != end); return pages; } static unsigned long change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; unsigned long start = addr; unsigned long pages = 0; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); set_tlb_flush_pending(mm); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; pages += change_pud_range(vma, pgd, addr, next, newprot, dirty_accountable, prot_numa); } while (pgd++, addr = next, addr != end); /* Only flush the TLB if we actually modified any entries: */ if (pages) flush_tlb_range(vma, start, end); clear_tlb_flush_pending(mm); return pages; } unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { unsigned long pages; if (is_vm_hugetlb_page(vma)) pages = hugetlb_change_protection(vma, start, end, newprot); else pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); return pages; } int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) { struct mm_struct *mm = vma->vm_mm; unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0; pgoff_t pgoff; int error; int dirty_accountable = 0; if (newflags == oldflags) { *pprev = vma; return 0; } /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we * make it unwritable again. hugetlb mapping were accounted for * even if read-only so there is no need to account for them here */ if (newflags & VM_WRITE) { if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| VM_SHARED|VM_NORESERVE))) { charged = nrpages; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; newflags |= VM_ACCOUNT; } } /* * First try to merge with previous and/or next vma. */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (*pprev) { vma = *pprev; goto success; } *pprev = vma; if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto fail; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto fail; } success: /* * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ vma->vm_flags = newflags; dirty_accountable = vma_wants_writenotify(vma); vma_set_page_prot(vma); change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); perf_event_mmap(vma); return 0; fail: vm_unacct_memory(charged); return error; } SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, unsigned long, prot) { unsigned long vm_flags, nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; int error = -EINVAL; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ return -EINVAL; if (start & ~PAGE_MASK) return -EINVAL; if (!len) return 0; len = PAGE_ALIGN(len); end = start + len; if (end <= start) return -ENOMEM; if (!arch_validate_prot(prot)) return -EINVAL; reqprot = prot; /* * Does the application expect PROT_READ to imply PROT_EXEC: */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); down_write(&current->mm->mmap_sem); vma = find_vma(current->mm, start); error = -ENOMEM; if (!vma) goto out; prev = vma->vm_prev; if (unlikely(grows & PROT_GROWSDOWN)) { if (vma->vm_start >= end) goto out; start = vma->vm_start; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; } else { if (vma->vm_start > start) goto out; if (unlikely(grows & PROT_GROWSUP)) { end = vma->vm_end; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSUP)) goto out; } } if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { unsigned long newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vm_flags; newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); /* newflags >> 4 shift VM_MAY% in place of VM_% */ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { error = -EACCES; goto out; } error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) goto out; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; goto out; } } out: up_write(&current->mm->mmap_sem); return error; }
gpl-2.0
gundal/zerofltetmo
fs/proc/task_mmu.c
291
36716
#include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/huge_mm.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <asm/elf.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> #include "internal.h" void task_mem(struct seq_file *m, struct mm_struct *mm) { unsigned long data, text, lib, swap; unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; /* * Note: to minimize their overhead, mm maintains hiwater_vm and * hiwater_rss only when about to *lower* total_vm or rss. Any * collector of these hiwater stats must therefore get total_vm * and rss too, which will usually be the higher. Barriers? not * worth the effort, such snapshots can always be inconsistent. */ hiwater_vm = total_vm = mm->total_vm; if (hiwater_vm < mm->hiwater_vm) hiwater_vm = mm->hiwater_vm; hiwater_rss = total_rss = get_mm_rss(mm); if (hiwater_rss < mm->hiwater_rss) hiwater_rss = mm->hiwater_rss; data = mm->total_vm - mm->shared_vm - mm->stack_vm; text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; swap = get_mm_counter(mm, MM_SWAPENTS); seq_printf(m, "VmPeak:\t%8lu kB\n" "VmSize:\t%8lu kB\n" "VmLck:\t%8lu kB\n" "VmPin:\t%8lu kB\n" "VmHWM:\t%8lu kB\n" "VmRSS:\t%8lu kB\n" "VmData:\t%8lu kB\n" "VmStk:\t%8lu kB\n" "VmExe:\t%8lu kB\n" "VmLib:\t%8lu kB\n" "VmPTE:\t%8lu kB\n" "VmSwap:\t%8lu kB\n", hiwater_vm << (PAGE_SHIFT-10), total_vm << (PAGE_SHIFT-10), mm->locked_vm << (PAGE_SHIFT-10), mm->pinned_vm << (PAGE_SHIFT-10), hiwater_rss << (PAGE_SHIFT-10), total_rss << (PAGE_SHIFT-10), data << (PAGE_SHIFT-10), mm->stack_vm << (PAGE_SHIFT-10), text, lib, (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, swap << (PAGE_SHIFT-10)); } unsigned long task_vsize(struct mm_struct *mm) { return PAGE_SIZE * mm->total_vm; } unsigned long task_statm(struct mm_struct *mm, unsigned long *shared, unsigned long *text, unsigned long *data, unsigned long *resident) { *shared = get_mm_counter(mm, MM_FILEPAGES); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT; *data = mm->total_vm - mm->shared_vm; *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); return mm->total_vm; } static void pad_len_spaces(struct seq_file *m, int len) { len = 25 + sizeof(void*) * 6 - len; if (len < 1) len = 1; seq_printf(m, "%*c", len, ' '); } #ifdef CONFIG_NUMA /* * These functions are for numa_maps but called in generic **maps seq_file * ->start(), ->stop() ops. * * numa_maps scans all vmas under mmap_sem and checks their mempolicy. * Each mempolicy object is controlled by reference counting. The problem here * is how to avoid accessing dead mempolicy object. * * Because we're holding mmap_sem while reading seq_file, it's safe to access * each vma's mempolicy, no vma objects will never drop refs to mempolicy. * * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy * is set and replaced under mmap_sem but unrefed and cleared under task_lock(). * So, without task_lock(), we cannot trust get_vma_policy() because we cannot * gurantee the task never exits under us. But taking task_lock() around * get_vma_plicy() causes lock order problem. * * To access task->mempolicy without lock, we hold a reference count of an * object pointed by task->mempolicy and remember it. This will guarantee * that task->mempolicy points to an alive object or NULL in numa_maps accesses. */ static void hold_task_mempolicy(struct proc_maps_private *priv) { struct task_struct *task = priv->task; task_lock(task); priv->task_mempolicy = task->mempolicy; mpol_get(priv->task_mempolicy); task_unlock(task); } static void release_task_mempolicy(struct proc_maps_private *priv) { mpol_put(priv->task_mempolicy); } #else static void hold_task_mempolicy(struct proc_maps_private *priv) { } static void release_task_mempolicy(struct proc_maps_private *priv) { } #endif static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma) { const char __user *name = vma_get_anon_name(vma); struct mm_struct *mm = vma->vm_mm; unsigned long page_start_vaddr; unsigned long page_offset; unsigned long num_pages; unsigned long max_len = NAME_MAX; int i; page_start_vaddr = (unsigned long)name & PAGE_MASK; page_offset = (unsigned long)name - page_start_vaddr; num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE); seq_puts(m, "[anon:"); for (i = 0; i < num_pages; i++) { int len; int write_len; const char *kaddr; long pages_pinned; struct page *page; pages_pinned = get_user_pages(current, mm, page_start_vaddr, 1, 0, 0, &page, NULL); if (pages_pinned < 1) { seq_puts(m, "<fault>]"); return; } kaddr = (const char *)kmap(page); len = min(max_len, PAGE_SIZE - page_offset); write_len = strnlen(kaddr + page_offset, len); seq_write(m, kaddr + page_offset, write_len); kunmap(page); put_page(page); /* if strnlen hit a null terminator then we're done */ if (write_len != len) break; max_len -= len; page_offset = 0; page_start_vaddr += PAGE_SIZE; } seq_putc(m, ']'); } static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) { if (vma && vma != priv->tail_vma) { struct mm_struct *mm = vma->vm_mm; release_task_mempolicy(priv); up_read(&mm->mmap_sem); mmput(mm); } } static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_maps_private *priv = m->private; unsigned long last_addr = m->version; struct mm_struct *mm; struct vm_area_struct *vma, *tail_vma = NULL; loff_t l = *pos; /* Clear the per syscall fields in priv */ priv->task = NULL; priv->tail_vma = NULL; /* * We remember last_addr rather than next_addr to hit with * mmap_cache most of the time. We have zero last_addr at * the beginning and also after lseek. We will have -1 last_addr * after the end of the vmas. */ if (last_addr == -1UL) return NULL; priv->task = get_pid_task(priv->pid, PIDTYPE_PID); if (!priv->task) return ERR_PTR(-ESRCH); mm = mm_access(priv->task, PTRACE_MODE_READ); if (!mm || IS_ERR(mm)) return mm; down_read(&mm->mmap_sem); tail_vma = get_gate_vma(priv->task->mm); priv->tail_vma = tail_vma; hold_task_mempolicy(priv); /* Start with last addr hint */ vma = find_vma(mm, last_addr); if (last_addr && vma) { vma = vma->vm_next; goto out; } /* * Check the vma index is within the range and do * sequential scan until m_index. */ vma = NULL; if ((unsigned long)l < mm->map_count) { vma = mm->mmap; while (l-- && vma) vma = vma->vm_next; goto out; } if (l != mm->map_count) tail_vma = NULL; /* After gate vma */ out: if (vma) return vma; release_task_mempolicy(priv); /* End of vmas has been reached */ m->version = (tail_vma != NULL)? 0: -1UL; up_read(&mm->mmap_sem); mmput(mm); return tail_vma; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; struct vm_area_struct *tail_vma = priv->tail_vma; (*pos)++; if (vma && (vma != tail_vma) && vma->vm_next) return vma->vm_next; vma_stop(priv, vma); return (vma != tail_vma)? tail_vma: NULL; } static void m_stop(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; if (!IS_ERR(vma)) vma_stop(priv, vma); if (priv->task) put_task_struct(priv->task); } static int do_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { struct proc_maps_private *priv; int ret = -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { priv->pid = proc_pid(inode); ret = seq_open(file, ops); if (!ret) { struct seq_file *m = file->private_data; m->private = priv; } else { kfree(priv); } } return ret; } static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) { struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; vm_flags_t flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; unsigned long start, end; dev_t dev = 0; int len; const char *name = NULL; if (file) { struct inode *inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; if (stack_guard_page_start(vma, start)) start += PAGE_SIZE; end = vma->vm_end; if (stack_guard_page_end(vma, end)) end -= PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, end, flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', flags & VM_MAYSHARE ? 's' : 'p', pgoff, MAJOR(dev), MINOR(dev), ino, &len); /* * Print the dentry name for named mappings, and a * special [heap] marker for the heap: */ if (file) { pad_len_spaces(m, len); seq_path(m, &file->f_path, "\n"); goto done; } name = arch_vma_name(vma); if (!name) { pid_t tid; if (!mm) { name = "[vdso]"; goto done; } if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { name = "[heap]"; goto done; } tid = vm_is_stack(task, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or * the main process stack. */ if (!is_pid || (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack)) { name = "[stack]"; } else { /* Thread stack in /proc/PID/maps */ pad_len_spaces(m, len); seq_printf(m, "[stack:%d]", tid); } goto done; } if (vma_get_anon_name(vma)) { pad_len_spaces(m, len); seq_print_vma_name(m, vma); } } done: if (name) { pad_len_spaces(m, len); seq_puts(m, name); } seq_putc(m, '\n'); } static int show_map(struct seq_file *m, void *v, int is_pid) { struct vm_area_struct *vma = v; struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; show_map_vma(m, vma, is_pid); if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; return 0; } static int show_pid_map(struct seq_file *m, void *v) { return show_map(m, v, 1); } static int show_tid_map(struct seq_file *m, void *v) { return show_map(m, v, 0); } static const struct seq_operations proc_pid_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_pid_map }; static const struct seq_operations proc_tid_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_tid_map }; static int pid_maps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_maps_op); } static int tid_maps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_tid_maps_op); } const struct file_operations proc_pid_maps_operations = { .open = pid_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; const struct file_operations proc_tid_maps_operations = { .open = tid_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; /* * Proportional Set Size(PSS): my share of RSS. * * PSS of a process is the count of pages it has in memory, where each * page is divided by the number of processes sharing it. So if a * process has 1000 pages all to itself, and 1000 shared with one other * process, its PSS will be 1500. * * To keep (accumulated) division errors low, we adopt a 64bit * fixed-point pss counter to minimize division errors. So (pss >> * PSS_SHIFT) would be the real byte count. * * A shift of 12 before division means (assuming 4K page size): * - 1M 3-user-pages add up to 8KB errors; * - supports mapcount up to 2^24, or 16M; * - supports PSS up to 2^52 bytes, or 4PB. */ #define PSS_SHIFT 12 #ifdef CONFIG_PROC_PAGE_MONITOR struct mem_size_stats { struct vm_area_struct *vma; unsigned long resident; unsigned long shared_clean; unsigned long shared_dirty; unsigned long private_clean; unsigned long private_dirty; unsigned long referenced; unsigned long anonymous; unsigned long anonymous_thp; unsigned long swap; unsigned long nonlinear; u64 pss; }; static void smaps_pte_entry(pte_t ptent, unsigned long addr, unsigned long ptent_size, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = mss->vma; pgoff_t pgoff = linear_page_index(vma, addr); struct page *page = NULL; int mapcount; if (pte_present(ptent)) { page = vm_normal_page(vma, addr, ptent); } else if (is_swap_pte(ptent)) { swp_entry_t swpent = pte_to_swp_entry(ptent); if (!non_swap_entry(swpent)) mss->swap += ptent_size; else if (is_migration_entry(swpent)) page = migration_entry_to_page(swpent); } else if (pte_file(ptent)) { if (pte_to_pgoff(ptent) != pgoff) mss->nonlinear += ptent_size; } if (!page) return; if (PageAnon(page)) mss->anonymous += ptent_size; if (page->index != pgoff) mss->nonlinear += ptent_size; mss->resident += ptent_size; /* Accumulate the size in pages that have been accessed. */ if (pte_young(ptent) || PageReferenced(page)) mss->referenced += ptent_size; mapcount = page_mapcount(page); if (mapcount >= 2) { if (pte_dirty(ptent) || PageDirty(page)) mss->shared_dirty += ptent_size; else mss->shared_clean += ptent_size; mss->pss += (ptent_size << PSS_SHIFT) / mapcount; } else { if (pte_dirty(ptent) || PageDirty(page)) mss->private_dirty += ptent_size; else mss->private_clean += ptent_size; mss->pss += (ptent_size << PSS_SHIFT); } } static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = mss->vma; pte_t *pte; spinlock_t *ptl; if (pmd_trans_huge_lock(pmd, vma) == 1) { smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); spin_unlock(&walk->mm->page_table_lock); mss->anonymous_thp += HPAGE_PMD_SIZE; return 0; } if (pmd_trans_unstable(pmd)) return 0; /* * The mmap_sem held all the way back in m_start() is what * keeps khugepaged out of here and from collapsing things * in here. */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) smaps_pte_entry(*pte, addr, PAGE_SIZE, walk); pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) { /* * Don't forget to update Documentation/ on changes. */ static const char mnemonics[BITS_PER_LONG][2] = { /* * In case if we meet a flag we don't know about. */ [0 ... (BITS_PER_LONG-1)] = "??", [ilog2(VM_READ)] = "rd", [ilog2(VM_WRITE)] = "wr", [ilog2(VM_EXEC)] = "ex", [ilog2(VM_SHARED)] = "sh", [ilog2(VM_MAYREAD)] = "mr", [ilog2(VM_MAYWRITE)] = "mw", [ilog2(VM_MAYEXEC)] = "me", [ilog2(VM_MAYSHARE)] = "ms", [ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_PFNMAP)] = "pf", [ilog2(VM_DENYWRITE)] = "dw", [ilog2(VM_LOCKED)] = "lo", [ilog2(VM_IO)] = "io", [ilog2(VM_SEQ_READ)] = "sr", [ilog2(VM_RAND_READ)] = "rr", [ilog2(VM_DONTCOPY)] = "dc", [ilog2(VM_DONTEXPAND)] = "de", [ilog2(VM_ACCOUNT)] = "ac", [ilog2(VM_NORESERVE)] = "nr", [ilog2(VM_HUGETLB)] = "ht", [ilog2(VM_NONLINEAR)] = "nl", [ilog2(VM_ARCH_1)] = "ar", [ilog2(VM_DONTDUMP)] = "dd", [ilog2(VM_MIXEDMAP)] = "mm", [ilog2(VM_HUGEPAGE)] = "hg", [ilog2(VM_NOHUGEPAGE)] = "nh", [ilog2(VM_MERGEABLE)] = "mg", }; size_t i; seq_puts(m, "VmFlags: "); for (i = 0; i < BITS_PER_LONG; i++) { if (vma->vm_flags & (1UL << i)) { seq_printf(m, "%c%c ", mnemonics[i][0], mnemonics[i][1]); } } seq_putc(m, '\n'); } static int show_smap(struct seq_file *m, void *v, int is_pid) { struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; struct vm_area_struct *vma = v; struct mem_size_stats mss; struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range, .mm = vma->vm_mm, .private = &mss, }; memset(&mss, 0, sizeof mss); mss.vma = vma; /* mmap_sem is held in m_start */ if (vma->vm_mm && !is_vm_hugetlb_page(vma)) walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); show_map_vma(m, vma, is_pid); seq_printf(m, "Size: %8lu kB\n" "Rss: %8lu kB\n" "Pss: %8lu kB\n" "Shared_Clean: %8lu kB\n" "Shared_Dirty: %8lu kB\n" "Private_Clean: %8lu kB\n" "Private_Dirty: %8lu kB\n" "Referenced: %8lu kB\n" "Anonymous: %8lu kB\n" "AnonHugePages: %8lu kB\n" "Swap: %8lu kB\n" "KernelPageSize: %8lu kB\n" "MMUPageSize: %8lu kB\n" "Locked: %8lu kB\n", (vma->vm_end - vma->vm_start) >> 10, mss.resident >> 10, (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), mss.shared_clean >> 10, mss.shared_dirty >> 10, mss.private_clean >> 10, mss.private_dirty >> 10, mss.referenced >> 10, mss.anonymous >> 10, mss.anonymous_thp >> 10, mss.swap >> 10, vma_kernel_pagesize(vma) >> 10, vma_mmu_pagesize(vma) >> 10, (vma->vm_flags & VM_LOCKED) ? (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); if (vma->vm_flags & VM_NONLINEAR) seq_printf(m, "Nonlinear: %8lu kB\n", mss.nonlinear >> 10); show_smap_vma_flags(m, vma); if (vma_get_anon_name(vma)) { seq_puts(m, "Name: "); seq_print_vma_name(m, vma); seq_putc(m, '\n'); } if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; return 0; } static int show_pid_smap(struct seq_file *m, void *v) { return show_smap(m, v, 1); } static int show_tid_smap(struct seq_file *m, void *v) { return show_smap(m, v, 0); } static const struct seq_operations proc_pid_smaps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_pid_smap }; static const struct seq_operations proc_tid_smaps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_tid_smap }; static int pid_smaps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_smaps_op); } static int tid_smaps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_tid_smaps_op); } const struct file_operations proc_pid_smaps_operations = { .open = pid_smaps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; const struct file_operations proc_tid_smaps_operations = { .open = tid_smaps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; split_huge_page_pmd(vma, addr, pmd); if (pmd_trans_unstable(pmd)) return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } #define CLEAR_REFS_ALL 1 #define CLEAR_REFS_ANON 2 #define CLEAR_REFS_MAPPED 3 static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; struct mm_struct *mm; struct vm_area_struct *vma; int type; int rv; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; rv = kstrtoint(strstrip(buffer), 10, &type); if (rv < 0) return rv; if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) return -EINVAL; task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; mm = get_task_mm(task); if (mm) { struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, .mm = mm, }; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_walk.private = vma; if (is_vm_hugetlb_page(vma)) continue; /* * Writing 1 to /proc/pid/clear_refs affects all pages. * * Writing 2 to /proc/pid/clear_refs only affects * Anonymous pages. * * Writing 3 to /proc/pid/clear_refs only affects file * mapped pages. */ if (type == CLEAR_REFS_ANON && vma->vm_file) continue; if (type == CLEAR_REFS_MAPPED && !vma->vm_file) continue; walk_page_range(vma->vm_start, vma->vm_end, &clear_refs_walk); } flush_tlb_mm(mm); up_read(&mm->mmap_sem); mmput(mm); } put_task_struct(task); return count; } const struct file_operations proc_clear_refs_operations = { .write = clear_refs_write, .llseek = noop_llseek, }; typedef struct { u64 pme; } pagemap_entry_t; struct pagemapread { int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ pagemap_entry_t *buffer; }; #define PAGEMAP_WALK_SIZE (PMD_SIZE) #define PAGEMAP_WALK_MASK (PMD_MASK) #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) #define PM_FILE PM_STATUS(1LL) #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) #define PM_END_OF_BUFFER 1 static inline pagemap_entry_t make_pme(u64 val) { return (pagemap_entry_t) { .pme = val }; } static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, struct pagemapread *pm) { pm->buffer[pm->pos++] = *pme; if (pm->pos >= pm->len) return PM_END_OF_BUFFER; return 0; } static int pagemap_pte_hole(unsigned long start, unsigned long end, struct mm_walk *walk) { struct pagemapread *pm = walk->private; unsigned long addr; int err = 0; pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); for (addr = start; addr < end; addr += PAGE_SIZE) { err = add_to_pagemap(addr, &pme, pm); if (err) break; } return err; } static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct vm_area_struct *vma, unsigned long addr, pte_t pte) { u64 frame, flags; struct page *page = NULL; if (pte_present(pte)) { frame = pte_pfn(pte); flags = PM_PRESENT; page = vm_normal_page(vma, addr, pte); } else if (is_swap_pte(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags = PM_SWAP; if (is_migration_entry(entry)) page = migration_entry_to_page(entry); } else { *pme = make_pme(PM_NOT_PRESENT); return; } if (page && !PageAnon(page)) flags |= PM_FILE; *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, pmd_t pmd, int offset) { /* * Currently pmd for thp is always present because thp can not be * swapped-out, migrated, or HWPOISONed (split in such cases instead.) * This if-check is just to prepare for future implementation. */ if (pmd_present(pmd)) *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); else *pme = make_pme(PM_NOT_PRESENT); } #else static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, pmd_t pmd, int offset) { } #endif static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma; struct pagemapread *pm = walk->private; pte_t *pte; int err = 0; pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { for (; addr != end; addr += PAGE_SIZE) { unsigned long offset; offset = (addr & ~PAGEMAP_WALK_MASK) >> PAGE_SHIFT; thp_pmd_to_pagemap_entry(&pme, *pmd, offset); err = add_to_pagemap(addr, &pme, pm); if (err) break; } spin_unlock(&walk->mm->page_table_lock); return err; } if (pmd_trans_unstable(pmd)) return 0; for (; addr != end; addr += PAGE_SIZE) { /* check to see if we've left 'vma' behind * and need a new, higher one */ if (vma && (addr >= vma->vm_end)) { vma = find_vma(walk->mm, addr); pme = make_pme(PM_NOT_PRESENT); } /* check that 'vma' actually covers this address, * and that it isn't a huge page vma */ if (vma && (vma->vm_start <= addr) && !is_vm_hugetlb_page(vma)) { pte = pte_offset_map(pmd, addr); pte_to_pagemap_entry(&pme, vma, addr, *pte); /* unmap before userspace copy */ pte_unmap(pte); } err = add_to_pagemap(addr, &pme, pm); if (err) return err; } cond_resched(); return err; } #ifdef CONFIG_HUGETLB_PAGE static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte, int offset) { if (pte_present(pte)) *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); else *pme = make_pme(PM_NOT_PRESENT); } /* This function walks within one hugetlb entry in the single call */ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct pagemapread *pm = walk->private; int err = 0; pagemap_entry_t pme; for (; addr != end; addr += PAGE_SIZE) { int offset = (addr & ~hmask) >> PAGE_SHIFT; huge_pte_to_pagemap_entry(&pme, *pte, offset); err = add_to_pagemap(addr, &pme, pm); if (err) return err; } cond_resched(); return err; } #endif /* HUGETLB_PAGE */ /* * /proc/pid/pagemap - an array mapping virtual pages to pfns * * For each page in the address space, this file contains one 64-bit entry * consisting of the following: * * Bits 0-54 page frame number (PFN) if present * Bits 0-4 swap type if swapped * Bits 5-54 swap offset if swapped * Bits 55-60 page shift (page size = 1<<page shift) * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present * * If the page is not present but in swap, then the PFN contains an * encoding of the swap file number and the page's offset into the * swap. Unmapped pages return a null PFN. This allows determining * precisely which pages are mapped (or in swap) and comparing mapped * pages between processes. * * Efficient users of this interface will use /proc/pid/maps to * determine which areas of memory are actually mapped and llseek to * skip over unmapped regions. */ static ssize_t pagemap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file_inode(file)); struct mm_struct *mm; struct pagemapread pm; int ret = -ESRCH; struct mm_walk pagemap_walk = {}; unsigned long src; unsigned long svpfn; unsigned long start_vaddr; unsigned long end_vaddr; int copied = 0; if (!task) goto out; ret = -EINVAL; /* file position must be aligned */ if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) goto out_task; ret = 0; if (!count) goto out_task; pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); ret = -ENOMEM; if (!pm.buffer) goto out_task; mm = mm_access(task, PTRACE_MODE_READ); ret = PTR_ERR(mm); if (!mm || IS_ERR(mm)) goto out_free; pagemap_walk.pmd_entry = pagemap_pte_range; pagemap_walk.pte_hole = pagemap_pte_hole; #ifdef CONFIG_HUGETLB_PAGE pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; #endif pagemap_walk.mm = mm; pagemap_walk.private = &pm; src = *ppos; svpfn = src / PM_ENTRY_BYTES; start_vaddr = svpfn << PAGE_SHIFT; end_vaddr = TASK_SIZE_OF(task); /* watch out for wraparound */ if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) start_vaddr = end_vaddr; /* * The odds are that this will stop walking way * before end_vaddr, because the length of the * user buffer is tracked in "pm", and the walk * will stop when we hit the end of the buffer. */ ret = 0; while (count && (start_vaddr < end_vaddr)) { int len; unsigned long end; pm.pos = 0; end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; /* overflow ? */ if (end < start_vaddr || end > end_vaddr) end = end_vaddr; down_read(&mm->mmap_sem); ret = walk_page_range(start_vaddr, end, &pagemap_walk); up_read(&mm->mmap_sem); start_vaddr = end; len = min(count, PM_ENTRY_BYTES * pm.pos); if (copy_to_user(buf, pm.buffer, len)) { ret = -EFAULT; goto out_mm; } copied += len; buf += len; count -= len; } *ppos += copied; if (!ret || ret == PM_END_OF_BUFFER) ret = copied; out_mm: mmput(mm); out_free: kfree(pm.buffer); out_task: put_task_struct(task); out: return ret; } const struct file_operations proc_pagemap_operations = { .llseek = mem_lseek, /* borrow this */ .read = pagemap_read, }; #endif /* CONFIG_PROC_PAGE_MONITOR */ #ifdef CONFIG_NUMA struct numa_maps { struct vm_area_struct *vma; unsigned long pages; unsigned long anon; unsigned long active; unsigned long writeback; unsigned long mapcount_max; unsigned long dirty; unsigned long swapcache; unsigned long node[MAX_NUMNODES]; }; struct numa_maps_private { struct proc_maps_private proc_maps; struct numa_maps md; }; static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) { int count = page_mapcount(page); md->pages += nr_pages; if (pte_dirty || PageDirty(page)) md->dirty += nr_pages; if (PageSwapCache(page)) md->swapcache += nr_pages; if (PageActive(page) || PageUnevictable(page)) md->active += nr_pages; if (PageWriteback(page)) md->writeback += nr_pages; if (PageAnon(page)) md->anon += nr_pages; if (count > md->mapcount_max) md->mapcount_max = count; md->node[page_to_nid(page)] += nr_pages; } static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, unsigned long addr) { struct page *page; int nid; if (!pte_present(pte)) return NULL; page = vm_normal_page(vma, addr, pte); if (!page) return NULL; if (PageReserved(page)) return NULL; nid = page_to_nid(page); if (!node_isset(nid, node_states[N_MEMORY])) return NULL; return page; } static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md; spinlock_t *ptl; pte_t *orig_pte; pte_t *pte; md = walk->private; if (pmd_trans_huge_lock(pmd, md->vma) == 1) { pte_t huge_pte = *(pte_t *)pmd; struct page *page; page = can_gather_numa_stats(huge_pte, md->vma, addr); if (page) gather_stats(page, md, pte_dirty(huge_pte), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(&walk->mm->page_table_lock); return 0; } if (pmd_trans_unstable(pmd)) return 0; orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { struct page *page = can_gather_numa_stats(*pte, md->vma, addr); if (!page) continue; gather_stats(page, md, pte_dirty(*pte), 1); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); return 0; } #ifdef CONFIG_HUGETLB_PAGE static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md; struct page *page; if (pte_none(*pte)) return 0; page = pte_page(*pte); if (!page) return 0; md = walk->private; gather_stats(page, md, pte_dirty(*pte), 1); return 0; } #else static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; } #endif /* * Display pages allocated per node and memory policy via /proc. */ static int show_numa_map(struct seq_file *m, void *v, int is_pid) { struct numa_maps_private *numa_priv = m->private; struct proc_maps_private *proc_priv = &numa_priv->proc_maps; struct vm_area_struct *vma = v; struct numa_maps *md = &numa_priv->md; struct file *file = vma->vm_file; struct task_struct *task = proc_priv->task; struct mm_struct *mm = vma->vm_mm; struct mm_walk walk = {}; struct mempolicy *pol; int n; char buffer[50]; if (!mm) return 0; /* Ensure we start with an empty set of numa_maps statistics. */ memset(md, 0, sizeof(*md)); md->vma = vma; walk.hugetlb_entry = gather_hugetbl_stats; walk.pmd_entry = gather_pte_stats; walk.private = md; walk.mm = mm; pol = get_vma_policy(task, vma, vma->vm_start); mpol_to_str(buffer, sizeof(buffer), pol); mpol_cond_put(pol); seq_printf(m, "%08lx %s", vma->vm_start, buffer); if (file) { seq_printf(m, " file="); seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else { pid_t tid = vm_is_stack(task, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or * the main process stack. */ if (!is_pid || (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack)) seq_printf(m, " stack"); else seq_printf(m, " stack:%d", tid); } } if (is_vm_hugetlb_page(vma)) seq_printf(m, " huge"); walk_page_range(vma->vm_start, vma->vm_end, &walk); if (!md->pages) goto out; if (md->anon) seq_printf(m, " anon=%lu", md->anon); if (md->dirty) seq_printf(m, " dirty=%lu", md->dirty); if (md->pages != md->anon && md->pages != md->dirty) seq_printf(m, " mapped=%lu", md->pages); if (md->mapcount_max > 1) seq_printf(m, " mapmax=%lu", md->mapcount_max); if (md->swapcache) seq_printf(m, " swapcache=%lu", md->swapcache); if (md->active < md->pages && !is_vm_hugetlb_page(vma)) seq_printf(m, " active=%lu", md->active); if (md->writeback) seq_printf(m, " writeback=%lu", md->writeback); for_each_node_state(n, N_MEMORY) if (md->node[n]) seq_printf(m, " N%d=%lu", n, md->node[n]); out: seq_putc(m, '\n'); if (m->count < m->size) m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; return 0; } static int show_pid_numa_map(struct seq_file *m, void *v) { return show_numa_map(m, v, 1); } static int show_tid_numa_map(struct seq_file *m, void *v) { return show_numa_map(m, v, 0); } static const struct seq_operations proc_pid_numa_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_pid_numa_map, }; static const struct seq_operations proc_tid_numa_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_tid_numa_map, }; static int numa_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { struct numa_maps_private *priv; int ret = -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { priv->proc_maps.pid = proc_pid(inode); ret = seq_open(file, ops); if (!ret) { struct seq_file *m = file->private_data; m->private = priv; } else { kfree(priv); } } return ret; } static int pid_numa_maps_open(struct inode *inode, struct file *file) { return numa_maps_open(inode, file, &proc_pid_numa_maps_op); } static int tid_numa_maps_open(struct inode *inode, struct file *file) { return numa_maps_open(inode, file, &proc_tid_numa_maps_op); } const struct file_operations proc_pid_numa_maps_operations = { .open = pid_numa_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; const struct file_operations proc_tid_numa_maps_operations = { .open = tid_numa_maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* CONFIG_NUMA */
gpl-2.0
erasmux/pyramid-gb-kernel
drivers/infiniband/hw/ehca/ehca_mrmw.c
803
75993
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * MR/MW functions * * Authors: Dietmar Decker <ddecker@de.ibm.com> * Christoph Raisch <raisch@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> #include <rdma/ib_umem.h> #include "ehca_iverbs.h" #include "ehca_mrmw.h" #include "hcp_if.h" #include "hipz_hw.h" #define NUM_CHUNKS(length, chunk_size) \ (((length) + (chunk_size - 1)) / (chunk_size)) /* max number of rpages (per hcall register_rpages) */ #define MAX_RPAGES 512 /* DMEM toleration management */ #define EHCA_SECTSHIFT SECTION_SIZE_BITS #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT) #define EHCA_HUGEPAGESHIFT 34 #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT) #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2) #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT) #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */ #define EHCA_DIR_MAP_SIZE (0x10000) #define EHCA_ENT_MAP_SIZE (0x10000) #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1) static unsigned long ehca_mr_len; /* * Memory map data structures */ struct ehca_dir_bmap { u64 ent[EHCA_MAP_ENTRIES]; }; struct ehca_top_bmap { struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES]; }; struct ehca_bmap { struct ehca_top_bmap *top[EHCA_MAP_ENTRIES]; }; static struct ehca_bmap *ehca_bmap; static struct kmem_cache *mr_cache; static struct kmem_cache *mw_cache; enum ehca_mr_pgsize { EHCA_MR_PGSIZE4K = 0x1000L, EHCA_MR_PGSIZE64K = 0x10000L, EHCA_MR_PGSIZE1M = 0x100000L, EHCA_MR_PGSIZE16M = 0x1000000L }; #define EHCA_MR_PGSHIFT4K 12 #define EHCA_MR_PGSHIFT64K 16 #define EHCA_MR_PGSHIFT1M 20 #define EHCA_MR_PGSHIFT16M 24 static u64 ehca_map_vaddr(void *caddr); static u32 ehca_encode_hwpage_size(u32 pgsize) { int log = ilog2(pgsize); WARN_ON(log < 12 || log > 24 || log & 3); return (log - 12) / 4; } static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) { return 1UL << ilog2(shca->hca_cap_mr_pgsize); } static struct ehca_mr *ehca_mr_new(void) { struct ehca_mr *me; me = kmem_cache_zalloc(mr_cache, GFP_KERNEL); if (me) spin_lock_init(&me->mrlock); else ehca_gen_err("alloc failed"); return me; } static void ehca_mr_delete(struct ehca_mr *me) { kmem_cache_free(mr_cache, me); } static struct ehca_mw *ehca_mw_new(void) { struct ehca_mw *me; me = kmem_cache_zalloc(mw_cache, GFP_KERNEL); if (me) spin_lock_init(&me->mwlock); else ehca_gen_err("alloc failed"); return me; } static void ehca_mw_delete(struct ehca_mw *me) { kmem_cache_free(mw_cache, me); } /*----------------------------------------------------------------------*/ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *ib_mr; int ret; struct ehca_mr *e_maxmr; struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); if (shca->maxmr) { e_maxmr = ehca_mr_new(); if (!e_maxmr) { ehca_err(&shca->ib_device, "out of memory"); ib_mr = ERR_PTR(-ENOMEM); goto get_dma_mr_exit0; } ret = ehca_reg_maxmr(shca, e_maxmr, (void *)ehca_map_vaddr((void *)KERNELBASE), mr_access_flags, e_pd, &e_maxmr->ib.ib_mr.lkey, &e_maxmr->ib.ib_mr.rkey); if (ret) { ehca_mr_delete(e_maxmr); ib_mr = ERR_PTR(ret); goto get_dma_mr_exit0; } ib_mr = &e_maxmr->ib.ib_mr; } else { ehca_err(&shca->ib_device, "no internal max-MR exist!"); ib_mr = ERR_PTR(-EINVAL); goto get_dma_mr_exit0; } get_dma_mr_exit0: if (IS_ERR(ib_mr)) ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x", PTR_ERR(ib_mr), pd, mr_access_flags); return ib_mr; } /* end ehca_get_dma_mr() */ /*----------------------------------------------------------------------*/ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_mr *ib_mr; int ret; struct ehca_mr *e_mr; struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); u64 size; if ((num_phys_buf <= 0) || !phys_buf_array) { ehca_err(pd->device, "bad input values: num_phys_buf=%x " "phys_buf_array=%p", num_phys_buf, phys_buf_array); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; } if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; } /* check physical buffer list and calculate size */ ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf, iova_start, &size); if (ret) { ib_mr = ERR_PTR(ret); goto reg_phys_mr_exit0; } if ((size == 0) || (((u64)iova_start + size) < (u64)iova_start)) { ehca_err(pd->device, "bad input values: size=%llx iova_start=%p", size, iova_start); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; } e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(pd->device, "out of memory"); ib_mr = ERR_PTR(-ENOMEM); goto reg_phys_mr_exit0; } /* register MR on HCA */ if (ehca_mr_is_maxmr(size, iova_start)) { e_mr->flags |= EHCA_MR_FLAG_MAXMR; ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags, e_pd, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); if (ret) { ib_mr = ERR_PTR(ret); goto reg_phys_mr_exit1; } } else { struct ehca_mr_pginfo pginfo; u32 num_kpages; u32 num_hwpages; u64 hw_pgsize; num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size, PAGE_SIZE); /* for kernel space we try most possible pgsize */ hw_pgsize = ehca_get_max_hwpage_size(shca); num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.hwpage_size = hw_pgsize; pginfo.num_hwpages = num_hwpages; pginfo.u.phy.num_phys_buf = num_phys_buf; pginfo.u.phy.phys_buf_array = phys_buf_array; pginfo.next_hwpage = ((u64)iova_start & ~PAGE_MASK) / hw_pgsize; ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); if (ret) { ib_mr = ERR_PTR(ret); goto reg_phys_mr_exit1; } } /* successful registration of all pages */ return &e_mr->ib.ib_mr; reg_phys_mr_exit1: ehca_mr_delete(e_mr); reg_phys_mr_exit0: if (IS_ERR(ib_mr)) ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p " "num_phys_buf=%x mr_access_flags=%x iova_start=%p", PTR_ERR(ib_mr), pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); return ib_mr; } /* end ehca_reg_phys_mr() */ /*----------------------------------------------------------------------*/ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int mr_access_flags, struct ib_udata *udata) { struct ib_mr *ib_mr; struct ehca_mr *e_mr; struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_mr_pginfo pginfo; int ret, page_shift; u32 num_kpages; u32 num_hwpages; u64 hwpage_size; if (!pd) { ehca_gen_err("bad pd=%p", pd); return ERR_PTR(-EFAULT); } if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit0; } if (length == 0 || virt + length < virt) { ehca_err(pd->device, "bad input values: length=%llx " "virt_base=%llx", length, virt); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit0; } e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(pd->device, "out of memory"); ib_mr = ERR_PTR(-ENOMEM); goto reg_user_mr_exit0; } e_mr->umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags, 0); if (IS_ERR(e_mr->umem)) { ib_mr = (void *)e_mr->umem; goto reg_user_mr_exit1; } if (e_mr->umem->page_size != PAGE_SIZE) { ehca_err(pd->device, "page size not supported, " "e_mr->umem->page_size=%x", e_mr->umem->page_size); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit2; } /* determine number of MR pages */ num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); /* select proper hw_pgsize */ page_shift = PAGE_SHIFT; if (e_mr->umem->hugetlb) { /* determine page_shift, clamp between 4K and 16M */ page_shift = (fls64(length - 1) + 3) & ~3; page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K), EHCA_MR_PGSHIFT16M); } hwpage_size = 1UL << page_shift; /* now that we have the desired page size, shift until it's * supported, too. 4K is always supported, so this terminates. */ while (!(hwpage_size & shca->hca_cap_mr_pgsize)) hwpage_size >>= 4; reg_user_mr_fallback: num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size); /* register MR on HCA */ memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_USER; pginfo.hwpage_size = hwpage_size; pginfo.num_kpages = num_kpages; pginfo.num_hwpages = num_hwpages; pginfo.u.usr.region = e_mr->umem; pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, (&e_mr->umem->chunk_list), list); ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { ehca_warn(pd->device, "failed to register mr " "with hwpage_size=%llx", hwpage_size); ehca_info(pd->device, "try to register mr with " "kpage_size=%lx", PAGE_SIZE); /* * this means kpages are not contiguous for a hw page * try kernel page size as fallback solution */ hwpage_size = PAGE_SIZE; goto reg_user_mr_fallback; } if (ret) { ib_mr = ERR_PTR(ret); goto reg_user_mr_exit2; } /* successful registration of all pages */ return &e_mr->ib.ib_mr; reg_user_mr_exit2: ib_umem_release(e_mr->umem); reg_user_mr_exit1: ehca_mr_delete(e_mr); reg_user_mr_exit0: if (IS_ERR(ib_mr)) ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p", PTR_ERR(ib_mr), pd, mr_access_flags, udata); return ib_mr; } /* end ehca_reg_user_mr() */ /*----------------------------------------------------------------------*/ int ehca_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { int ret; struct ehca_shca *shca = container_of(mr->device, struct ehca_shca, ib_device); struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); u64 new_size; u64 *new_start; u32 new_acl; struct ehca_pd *new_pd; u32 tmp_lkey, tmp_rkey; unsigned long sl_flags; u32 num_kpages = 0; u32 num_hwpages = 0; struct ehca_mr_pginfo pginfo; if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) { /* TODO not supported, because PHYP rereg hCall needs pages */ ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not " "supported yet, mr_rereg_mask=%x", mr_rereg_mask); ret = -EINVAL; goto rereg_phys_mr_exit0; } if (mr_rereg_mask & IB_MR_REREG_PD) { if (!pd) { ehca_err(mr->device, "rereg with bad pd, pd=%p " "mr_rereg_mask=%x", pd, mr_rereg_mask); ret = -EINVAL; goto rereg_phys_mr_exit0; } } if ((mr_rereg_mask & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) || (mr_rereg_mask == 0)) { ret = -EINVAL; goto rereg_phys_mr_exit0; } /* check other parameters */ if (e_mr == shca->maxmr) { /* should be impossible, however reject to be sure */ ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p " "shca->maxmr=%p mr->lkey=%x", mr, shca->maxmr, mr->lkey); ret = -EINVAL; goto rereg_phys_mr_exit0; } if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */ if (e_mr->flags & EHCA_MR_FLAG_FMR) { ehca_err(mr->device, "not supported for FMR, mr=%p " "flags=%x", mr, e_mr->flags); ret = -EINVAL; goto rereg_phys_mr_exit0; } if (!phys_buf_array || num_phys_buf <= 0) { ehca_err(mr->device, "bad input values mr_rereg_mask=%x" " phys_buf_array=%p num_phys_buf=%x", mr_rereg_mask, phys_buf_array, num_phys_buf); ret = -EINVAL; goto rereg_phys_mr_exit0; } } if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */ (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(mr->device, "bad input values: mr_rereg_mask=%x " "mr_access_flags=%x", mr_rereg_mask, mr_access_flags); ret = -EINVAL; goto rereg_phys_mr_exit0; } /* set requested values dependent on rereg request */ spin_lock_irqsave(&e_mr->mrlock, sl_flags); new_start = e_mr->start; new_size = e_mr->size; new_acl = e_mr->acl; new_pd = container_of(mr->pd, struct ehca_pd, ib_pd); if (mr_rereg_mask & IB_MR_REREG_TRANS) { u64 hw_pgsize = ehca_get_max_hwpage_size(shca); new_start = iova_start; /* change address */ /* check physical buffer list and calculate size */ ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf, iova_start, &new_size); if (ret) goto rereg_phys_mr_exit1; if ((new_size == 0) || (((u64)iova_start + new_size) < (u64)iova_start)) { ehca_err(mr->device, "bad input values: new_size=%llx " "iova_start=%p", new_size, iova_start); ret = -EINVAL; goto rereg_phys_mr_exit1; } num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + new_size, PAGE_SIZE); num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) + new_size, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.hwpage_size = hw_pgsize; pginfo.num_hwpages = num_hwpages; pginfo.u.phy.num_phys_buf = num_phys_buf; pginfo.u.phy.phys_buf_array = phys_buf_array; pginfo.next_hwpage = ((u64)iova_start & ~PAGE_MASK) / hw_pgsize; } if (mr_rereg_mask & IB_MR_REREG_ACCESS) new_acl = mr_access_flags; if (mr_rereg_mask & IB_MR_REREG_PD) new_pd = container_of(pd, struct ehca_pd, ib_pd); ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl, new_pd, &pginfo, &tmp_lkey, &tmp_rkey); if (ret) goto rereg_phys_mr_exit1; /* successful reregistration */ if (mr_rereg_mask & IB_MR_REREG_PD) mr->pd = pd; mr->lkey = tmp_lkey; mr->rkey = tmp_rkey; rereg_phys_mr_exit1: spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); rereg_phys_mr_exit0: if (ret) ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p " "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x " "iova_start=%p", ret, mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); return ret; } /* end ehca_rereg_phys_mr() */ /*----------------------------------------------------------------------*/ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) { int ret = 0; u64 h_ret; struct ehca_shca *shca = container_of(mr->device, struct ehca_shca, ib_device); struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); unsigned long sl_flags; struct ehca_mr_hipzout_parms hipzout; if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " "e_mr->flags=%x", mr, e_mr, e_mr->flags); ret = -EINVAL; goto query_mr_exit0; } memset(mr_attr, 0, sizeof(struct ib_mr_attr)); spin_lock_irqsave(&e_mr->mrlock, sl_flags); h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p " "hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, mr->lkey); ret = ehca2ib_return_code(h_ret); goto query_mr_exit1; } mr_attr->pd = mr->pd; mr_attr->device_virt_addr = hipzout.vaddr; mr_attr->size = hipzout.len; mr_attr->lkey = hipzout.lkey; mr_attr->rkey = hipzout.rkey; ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); query_mr_exit1: spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); query_mr_exit0: if (ret) ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p", ret, mr, mr_attr); return ret; } /* end ehca_query_mr() */ /*----------------------------------------------------------------------*/ int ehca_dereg_mr(struct ib_mr *mr) { int ret = 0; u64 h_ret; struct ehca_shca *shca = container_of(mr->device, struct ehca_shca, ib_device); struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " "e_mr->flags=%x", mr, e_mr, e_mr->flags); ret = -EINVAL; goto dereg_mr_exit0; } else if (e_mr == shca->maxmr) { /* should be impossible, however reject to be sure */ ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p " "shca->maxmr=%p mr->lkey=%x", mr, shca->maxmr, mr->lkey); ret = -EINVAL; goto dereg_mr_exit0; } /* TODO: BUSY: MR still has bound window(s) */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p " "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x", h_ret, shca, e_mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, mr->lkey); ret = ehca2ib_return_code(h_ret); goto dereg_mr_exit0; } if (e_mr->umem) ib_umem_release(e_mr->umem); /* successful deregistration */ ehca_mr_delete(e_mr); dereg_mr_exit0: if (ret) ehca_err(mr->device, "ret=%i mr=%p", ret, mr); return ret; } /* end ehca_dereg_mr() */ /*----------------------------------------------------------------------*/ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) { struct ib_mw *ib_mw; u64 h_ret; struct ehca_mw *e_mw; struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_mw_hipzout_parms hipzout; e_mw = ehca_mw_new(); if (!e_mw) { ib_mw = ERR_PTR(-ENOMEM); goto alloc_mw_exit0; } h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli " "shca=%p hca_hndl=%llx mw=%p", h_ret, shca, shca->ipz_hca_handle.handle, e_mw); ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); goto alloc_mw_exit1; } /* successful MW allocation */ e_mw->ipz_mw_handle = hipzout.handle; e_mw->ib_mw.rkey = hipzout.rkey; return &e_mw->ib_mw; alloc_mw_exit1: ehca_mw_delete(e_mw); alloc_mw_exit0: if (IS_ERR(ib_mw)) ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd); return ib_mw; } /* end ehca_alloc_mw() */ /*----------------------------------------------------------------------*/ int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) { /* TODO: not supported up to now */ ehca_gen_err("bind MW currently not supported by HCAD"); return -EPERM; } /* end ehca_bind_mw() */ /*----------------------------------------------------------------------*/ int ehca_dealloc_mw(struct ib_mw *mw) { u64 h_ret; struct ehca_shca *shca = container_of(mw->device, struct ehca_shca, ib_device); struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw); h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); if (h_ret != H_SUCCESS) { ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p " "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx", h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, e_mw->ipz_mw_handle.handle); return ehca2ib_return_code(h_ret); } /* successful deallocation */ ehca_mw_delete(e_mw); return 0; } /* end ehca_dealloc_mw() */ /*----------------------------------------------------------------------*/ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct ib_fmr *ib_fmr; struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_mr *e_fmr; int ret; u32 tmp_lkey, tmp_rkey; struct ehca_mr_pginfo pginfo; u64 hw_pgsize; /* check other parameters */ if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } if (mr_access_flags & IB_ACCESS_MW_BIND) { ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) { ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x " "fmr_attr->max_maps=%x fmr_attr->page_shift=%x", fmr_attr->max_pages, fmr_attr->max_maps, fmr_attr->page_shift); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } hw_pgsize = 1 << fmr_attr->page_shift; if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) { ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", fmr_attr->page_shift); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } e_fmr = ehca_mr_new(); if (!e_fmr) { ib_fmr = ERR_PTR(-ENOMEM); goto alloc_fmr_exit0; } e_fmr->flags |= EHCA_MR_FLAG_FMR; /* register MR on HCA */ memset(&pginfo, 0, sizeof(pginfo)); pginfo.hwpage_size = hw_pgsize; /* * pginfo.num_hwpages==0, ie register_rpages() will not be called * but deferred to map_phys_fmr() */ ret = ehca_reg_mr(shca, e_fmr, NULL, fmr_attr->max_pages * (1 << fmr_attr->page_shift), mr_access_flags, e_pd, &pginfo, &tmp_lkey, &tmp_rkey, EHCA_REG_MR); if (ret) { ib_fmr = ERR_PTR(ret); goto alloc_fmr_exit1; } /* successful */ e_fmr->hwpage_size = hw_pgsize; e_fmr->fmr_page_size = 1 << fmr_attr->page_shift; e_fmr->fmr_max_pages = fmr_attr->max_pages; e_fmr->fmr_max_maps = fmr_attr->max_maps; e_fmr->fmr_map_cnt = 0; return &e_fmr->ib.ib_fmr; alloc_fmr_exit1: ehca_mr_delete(e_fmr); alloc_fmr_exit0: return ib_fmr; } /* end ehca_alloc_fmr() */ /*----------------------------------------------------------------------*/ int ehca_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, u64 iova) { int ret; struct ehca_shca *shca = container_of(fmr->device, struct ehca_shca, ib_device); struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); struct ehca_mr_pginfo pginfo; u32 tmp_lkey, tmp_rkey; if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", e_fmr, e_fmr->flags); ret = -EINVAL; goto map_phys_fmr_exit0; } ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); if (ret) goto map_phys_fmr_exit0; if (iova % e_fmr->fmr_page_size) { /* only whole-numbered pages */ ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", iova, e_fmr->fmr_page_size); ret = -EINVAL; goto map_phys_fmr_exit0; } if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) { /* HCAD does not limit the maps, however trace this anyway */ ehca_info(fmr->device, "map limit exceeded, fmr=%p " "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x", fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); } memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_FMR; pginfo.num_kpages = list_len; pginfo.hwpage_size = e_fmr->hwpage_size; pginfo.num_hwpages = list_len * e_fmr->fmr_page_size / pginfo.hwpage_size; pginfo.u.fmr.page_list = page_list; pginfo.next_hwpage = (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size; pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, list_len * e_fmr->fmr_page_size, e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); if (ret) goto map_phys_fmr_exit0; /* successful reregistration */ e_fmr->fmr_map_cnt++; e_fmr->ib.ib_fmr.lkey = tmp_lkey; e_fmr->ib.ib_fmr.rkey = tmp_rkey; return 0; map_phys_fmr_exit0: if (ret) ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " "iova=%llx", ret, fmr, page_list, list_len, iova); return ret; } /* end ehca_map_phys_fmr() */ /*----------------------------------------------------------------------*/ int ehca_unmap_fmr(struct list_head *fmr_list) { int ret = 0; struct ib_fmr *ib_fmr; struct ehca_shca *shca = NULL; struct ehca_shca *prev_shca; struct ehca_mr *e_fmr; u32 num_fmr = 0; u32 unmap_fmr_cnt = 0; /* check all FMR belong to same SHCA, and check internal flag */ list_for_each_entry(ib_fmr, fmr_list, list) { prev_shca = shca; if (!ib_fmr) { ehca_gen_err("bad fmr=%p in list", ib_fmr); ret = -EINVAL; goto unmap_fmr_exit0; } shca = container_of(ib_fmr->device, struct ehca_shca, ib_device); e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); if ((shca != prev_shca) && prev_shca) { ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p " "prev_shca=%p e_fmr=%p", shca, prev_shca, e_fmr); ret = -EINVAL; goto unmap_fmr_exit0; } if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p " "e_fmr->flags=%x", e_fmr, e_fmr->flags); ret = -EINVAL; goto unmap_fmr_exit0; } num_fmr++; } /* loop over all FMRs to unmap */ list_for_each_entry(ib_fmr, fmr_list, list) { unmap_fmr_cnt++; e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); shca = container_of(ib_fmr->device, struct ehca_shca, ib_device); ret = ehca_unmap_one_fmr(shca, e_fmr); if (ret) { /* unmap failed, stop unmapping of rest of FMRs */ ehca_err(&shca->ib_device, "unmap of one FMR failed, " "stop rest, e_fmr=%p num_fmr=%x " "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr, unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey); goto unmap_fmr_exit0; } } unmap_fmr_exit0: if (ret) ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x", ret, fmr_list, num_fmr, unmap_fmr_cnt); return ret; } /* end ehca_unmap_fmr() */ /*----------------------------------------------------------------------*/ int ehca_dealloc_fmr(struct ib_fmr *fmr) { int ret; u64 h_ret; struct ehca_shca *shca = container_of(fmr->device, struct ehca_shca, ib_device); struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", e_fmr, e_fmr->flags); ret = -EINVAL; goto free_fmr_exit0; } h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p " "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, fmr->lkey); ret = ehca2ib_return_code(h_ret); goto free_fmr_exit0; } /* successful deregistration */ ehca_mr_delete(e_fmr); return 0; free_fmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr); return ret; } /* end ehca_dealloc_fmr() */ /*----------------------------------------------------------------------*/ static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, struct ehca_mr *e_mr, struct ehca_mr_pginfo *pginfo); int ehca_reg_mr(struct ehca_shca *shca, struct ehca_mr *e_mr, u64 *iova_start, u64 size, int acl, struct ehca_pd *e_pd, struct ehca_mr_pginfo *pginfo, u32 *lkey, /*OUT*/ u32 *rkey, /*OUT*/ enum ehca_reg_type reg_type) { int ret; u64 h_ret; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl); if (ehca_use_hp_mr == 1) hipz_acl |= 0x00000001; h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, (u64)iova_start, size, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli " "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle); ret = ehca2ib_return_code(h_ret); goto ehca_reg_mr_exit0; } e_mr->ipz_mr_handle = hipzout.handle; if (reg_type == EHCA_REG_BUSMAP_MR) ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo); else if (reg_type == EHCA_REG_MR) ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); else ret = -EINVAL; if (ret) goto ehca_reg_mr_exit1; /* successful registration */ e_mr->num_kpages = pginfo->num_kpages; e_mr->num_hwpages = pginfo->num_hwpages; e_mr->hwpage_size = pginfo->hwpage_size; e_mr->start = iova_start; e_mr->size = size; e_mr->acl = acl; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0; ehca_reg_mr_exit1: h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p " "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x " "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i", h_ret, shca, e_mr, iova_start, size, acl, e_pd, hipzout.lkey, pginfo, pginfo->num_kpages, pginfo->num_hwpages, ret); ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " "not recoverable"); } ehca_reg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_reg_mr() */ /*----------------------------------------------------------------------*/ int ehca_reg_mr_rpages(struct ehca_shca *shca, struct ehca_mr *e_mr, struct ehca_mr_pginfo *pginfo) { int ret = 0; u64 h_ret; u32 rnum; u64 rpage; u32 i; u64 *kpage; if (!pginfo->num_hwpages) /* in case of fmr */ return 0; kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!kpage) { ehca_err(&shca->ib_device, "kpage alloc failed"); ret = -ENOMEM; goto ehca_reg_mr_rpages_exit0; } /* max MAX_RPAGES ehca mr pages per register call */ for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) { if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */ if (rnum == 0) rnum = MAX_RPAGES; /* last shot is full */ } else rnum = MAX_RPAGES; ret = ehca_set_pagebuf(pginfo, rnum, kpage); if (ret) { ehca_err(&shca->ib_device, "ehca_set_pagebuf " "bad rc, ret=%i rnum=%x kpage=%p", ret, rnum, kpage); goto ehca_reg_mr_rpages_exit1; } if (rnum > 1) { rpage = virt_to_abs(kpage); if (!rpage) { ehca_err(&shca->ib_device, "kpage=%p i=%x", kpage, i); ret = -EFAULT; goto ehca_reg_mr_rpages_exit1; } } else rpage = *kpage; h_ret = hipz_h_register_rpage_mr( shca->ipz_hca_handle, e_mr, ehca_encode_hwpage_size(pginfo->hwpage_size), 0, rpage, rnum); if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { /* * check for 'registration complete'==H_SUCCESS * and for 'page registered'==H_PAGE_REGISTERED */ if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "last " "hipz_reg_rpage_mr failed, h_ret=%lli " "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx" " lkey=%x", h_ret, e_mr, i, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); break; } else ret = 0; } else if (h_ret != H_PAGE_REGISTERED) { ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx " "mr_hndl=%llx", h_ret, e_mr, i, e_mr->ib.ib_mr.lkey, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle); ret = ehca2ib_return_code(h_ret); break; } else ret = 0; } /* end for(i) */ ehca_reg_mr_rpages_exit1: ehca_free_fw_ctrlblock(kpage); ehca_reg_mr_rpages_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_reg_mr_rpages() */ /*----------------------------------------------------------------------*/ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, struct ehca_mr *e_mr, u64 *iova_start, u64 size, u32 acl, struct ehca_pd *e_pd, struct ehca_mr_pginfo *pginfo, u32 *lkey, /*OUT*/ u32 *rkey) /*OUT*/ { int ret; u64 h_ret; u32 hipz_acl; u64 *kpage; u64 rpage; struct ehca_mr_pginfo pginfo_save; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl); kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!kpage) { ehca_err(&shca->ib_device, "kpage alloc failed"); ret = -ENOMEM; goto ehca_rereg_mr_rereg1_exit0; } pginfo_save = *pginfo; ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); if (ret) { ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx " "kpage=%p", e_mr, pginfo, pginfo->type, pginfo->num_kpages, pginfo->num_hwpages, kpage); goto ehca_rereg_mr_rereg1_exit1; } rpage = virt_to_abs(kpage); if (!rpage) { ehca_err(&shca->ib_device, "kpage=%p", kpage); ret = -EFAULT; goto ehca_rereg_mr_rereg1_exit1; } h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr, (u64)iova_start, size, hipz_acl, e_pd->fw_pd, rpage, &hipzout); if (h_ret != H_SUCCESS) { /* * reregistration unsuccessful, try it again with the 3 hCalls, * e.g. this is required in case H_MR_CONDITION * (MW bound or MR is shared) */ ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr); *pginfo = pginfo_save; ret = -EAGAIN; } else if ((u64 *)hipzout.vaddr != iova_start) { ehca_err(&shca->ib_device, "PHYP changed iova_start in " "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p " "mr_handle=%llx lkey=%x lkey_out=%x", iova_start, hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey, hipzout.lkey); ret = -EFAULT; } else { /* * successful reregistration * note: start and start_out are identical for eServer HCAs */ e_mr->num_kpages = pginfo->num_kpages; e_mr->num_hwpages = pginfo->num_hwpages; e_mr->hwpage_size = pginfo->hwpage_size; e_mr->start = iova_start; e_mr->size = size; e_mr->acl = acl; *lkey = hipzout.lkey; *rkey = hipzout.rkey; } ehca_rereg_mr_rereg1_exit1: ehca_free_fw_ctrlblock(kpage); ehca_rereg_mr_rereg1_exit0: if ( ret && (ret != -EAGAIN) ) ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " "pginfo=%p num_kpages=%llx num_hwpages=%llx", ret, *lkey, *rkey, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_rereg_mr_rereg1() */ /*----------------------------------------------------------------------*/ int ehca_rereg_mr(struct ehca_shca *shca, struct ehca_mr *e_mr, u64 *iova_start, u64 size, int acl, struct ehca_pd *e_pd, struct ehca_mr_pginfo *pginfo, u32 *lkey, u32 *rkey) { int ret = 0; u64 h_ret; int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */ int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ /* first determine reregistration hCall(s) */ if ((pginfo->num_hwpages > MAX_RPAGES) || (e_mr->num_hwpages > MAX_RPAGES) || (pginfo->num_hwpages > e_mr->num_hwpages)) { ehca_dbg(&shca->ib_device, "Rereg3 case, " "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x", pginfo->num_hwpages, e_mr->num_hwpages); rereg_1_hcall = 0; rereg_3_hcall = 1; } if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */ rereg_1_hcall = 0; rereg_3_hcall = 1; e_mr->flags &= ~EHCA_MR_FLAG_MAXMR; ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p", e_mr); } if (rereg_1_hcall) { ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size, acl, e_pd, pginfo, lkey, rkey); if (ret) { if (ret == -EAGAIN) rereg_3_hcall = 1; else goto ehca_rereg_mr_exit0; } } if (rereg_3_hcall) { struct ehca_mr save_mr; /* first deregister old MR */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx " "mr->lkey=%x", h_ret, e_mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_rereg_mr_exit0; } /* clean ehca_mr_t, without changing struct ib_mr and lock */ save_mr = *e_mr; ehca_mr_deletenew(e_mr); /* set some MR values */ e_mr->flags = save_mr.flags; e_mr->hwpage_size = save_mr.hwpage_size; e_mr->fmr_page_size = save_mr.fmr_page_size; e_mr->fmr_max_pages = save_mr.fmr_max_pages; e_mr->fmr_max_maps = save_mr.fmr_max_maps; e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, e_pd, pginfo, lkey, rkey, EHCA_REG_MR); if (ret) { u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; memcpy(&e_mr->flags, &(save_mr.flags), sizeof(struct ehca_mr) - offset); goto ehca_rereg_mr_exit0; } } ehca_rereg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x " "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, rereg_1_hcall, rereg_3_hcall); return ret; } /* end ehca_rereg_mr() */ /*----------------------------------------------------------------------*/ int ehca_unmap_one_fmr(struct ehca_shca *shca, struct ehca_mr *e_fmr) { int ret = 0; u64 h_ret; struct ehca_pd *e_pd = container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); struct ehca_mr save_fmr; u32 tmp_lkey, tmp_rkey; struct ehca_mr_pginfo pginfo; struct ehca_mr_hipzout_parms hipzout; struct ehca_mr save_mr; if (e_fmr->fmr_max_pages <= MAX_RPAGES) { /* * note: after using rereg hcall with len=0, * rereg hcall must be used again for registering pages */ h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, 0, 0, e_pd->fw_pd, 0, &hipzout); if (h_ret == H_SUCCESS) { /* successful reregistration */ e_fmr->start = NULL; e_fmr->size = 0; tmp_lkey = hipzout.lkey; tmp_rkey = hipzout.rkey; return 0; } /* * should not happen, because length checked above, * FMRs are not shared and no MW bound to FMRs */ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx " "mr_hndl=%llx lkey=%x lkey_out=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey, hipzout.lkey); /* try free and rereg */ } /* first free old FMR */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx " "lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_unmap_one_fmr_exit0; } /* clean ehca_mr_t, without changing lock */ save_fmr = *e_fmr; ehca_mr_deletenew(e_fmr); /* set some MR values */ e_fmr->flags = save_fmr.flags; e_fmr->hwpage_size = save_fmr.hwpage_size; e_fmr->fmr_page_size = save_fmr.fmr_page_size; e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; e_fmr->acl = save_fmr.acl; memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_FMR; ret = ehca_reg_mr(shca, e_fmr, NULL, (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey, EHCA_REG_MR); if (ret) { u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; memcpy(&e_fmr->flags, &(save_mr.flags), sizeof(struct ehca_mr) - offset); } ehca_unmap_one_fmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x " "fmr_max_pages=%x", ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages); return ret; } /* end ehca_unmap_one_fmr() */ /*----------------------------------------------------------------------*/ int ehca_reg_smr(struct ehca_shca *shca, struct ehca_mr *e_origmr, struct ehca_mr *e_newmr, u64 *iova_start, int acl, struct ehca_pd *e_pd, u32 *lkey, /*OUT*/ u32 *rkey) /*OUT*/ { int ret = 0; u64 h_ret; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl); h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_reg_smr_exit0; } /* successful registration */ e_newmr->num_kpages = e_origmr->num_kpages; e_newmr->num_hwpages = e_origmr->num_hwpages; e_newmr->hwpage_size = e_origmr->hwpage_size; e_newmr->start = iova_start; e_newmr->size = e_origmr->size; e_newmr->acl = acl; e_newmr->ipz_mr_handle = hipzout.handle; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0; ehca_reg_smr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p " "e_newmr=%p iova_start=%p acl=%x e_pd=%p", ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd); return ret; } /* end ehca_reg_smr() */ /*----------------------------------------------------------------------*/ static inline void *ehca_calc_sectbase(int top, int dir, int idx) { unsigned long ret = idx; ret |= dir << EHCA_DIR_INDEX_SHIFT; ret |= top << EHCA_TOP_INDEX_SHIFT; return abs_to_virt(ret << SECTION_SIZE_BITS); } #define ehca_bmap_valid(entry) \ ((u64)entry != (u64)EHCA_INVAL_ADDR) static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage, struct ehca_shca *shca, struct ehca_mr *mr, struct ehca_mr_pginfo *pginfo) { u64 h_ret = 0; unsigned long page = 0; u64 rpage = virt_to_abs(kpage); int page_count; void *sectbase = ehca_calc_sectbase(top, dir, idx); if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) { ehca_err(&shca->ib_device, "reg_mr_section will probably fail:" "hwpage_size does not fit to " "section start address"); } page_count = EHCA_SECTSIZE / pginfo->hwpage_size; while (page < page_count) { u64 rnum; for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); rnum++) { void *pg = sectbase + ((page++) * pginfo->hwpage_size); kpage[rnum] = virt_to_abs(pg); } h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr, ehca_encode_hwpage_size(pginfo->hwpage_size), 0, rpage, rnum); if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) { ehca_err(&shca->ib_device, "register_rpage_mr failed"); return h_ret; } } return h_ret; } static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage, struct ehca_shca *shca, struct ehca_mr *mr, struct ehca_mr_pginfo *pginfo) { u64 hret = H_SUCCESS; int idx; for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) { if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx])) continue; hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr, pginfo); if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) return hret; } return hret; } static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca, struct ehca_mr *mr, struct ehca_mr_pginfo *pginfo) { u64 hret = H_SUCCESS; int dir; for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) continue; hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo); if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) return hret; } return hret; } /* register internal max-MR to internal SHCA */ int ehca_reg_internal_maxmr( struct ehca_shca *shca, struct ehca_pd *e_pd, struct ehca_mr **e_maxmr) /*OUT*/ { int ret; struct ehca_mr *e_mr; u64 *iova_start; u64 size_maxmr; struct ehca_mr_pginfo pginfo; struct ib_phys_buf ib_pbuf; u32 num_kpages; u32 num_hwpages; u64 hw_pgsize; if (!ehca_bmap) { ret = -EFAULT; goto ehca_reg_internal_maxmr_exit0; } e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(&shca->ib_device, "out of memory"); ret = -ENOMEM; goto ehca_reg_internal_maxmr_exit0; } e_mr->flags |= EHCA_MR_FLAG_MAXMR; /* register internal max-MR on HCA */ size_maxmr = ehca_mr_len; iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE); ib_pbuf.addr = 0; ib_pbuf.size = size_maxmr; num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, PAGE_SIZE); hw_pgsize = ehca_get_max_hwpage_size(shca); num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.num_hwpages = num_hwpages; pginfo.hwpage_size = hw_pgsize; pginfo.u.phy.num_phys_buf = 1; pginfo.u.phy.phys_buf_array = &ib_pbuf; ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR); if (ret) { ehca_err(&shca->ib_device, "reg of internal max MR failed, " "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " "num_hwpages=%x", e_mr, iova_start, size_maxmr, num_kpages, num_hwpages); goto ehca_reg_internal_maxmr_exit1; } /* successful registration of all pages */ e_mr->ib.ib_mr.device = e_pd->ib_pd.device; e_mr->ib.ib_mr.pd = &e_pd->ib_pd; e_mr->ib.ib_mr.uobject = NULL; atomic_inc(&(e_pd->ib_pd.usecnt)); atomic_set(&(e_mr->ib.ib_mr.usecnt), 0); *e_maxmr = e_mr; return 0; ehca_reg_internal_maxmr_exit1: ehca_mr_delete(e_mr); ehca_reg_internal_maxmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p", ret, shca, e_pd, e_maxmr); return ret; } /* end ehca_reg_internal_maxmr() */ /*----------------------------------------------------------------------*/ int ehca_reg_maxmr(struct ehca_shca *shca, struct ehca_mr *e_newmr, u64 *iova_start, int acl, struct ehca_pd *e_pd, u32 *lkey, u32 *rkey) { u64 h_ret; struct ehca_mr *e_origmr = shca->maxmr; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl); h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, e_origmr, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); return ehca2ib_return_code(h_ret); } /* successful registration */ e_newmr->num_kpages = e_origmr->num_kpages; e_newmr->num_hwpages = e_origmr->num_hwpages; e_newmr->hwpage_size = e_origmr->hwpage_size; e_newmr->start = iova_start; e_newmr->size = e_origmr->size; e_newmr->acl = acl; e_newmr->ipz_mr_handle = hipzout.handle; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0; } /* end ehca_reg_maxmr() */ /*----------------------------------------------------------------------*/ int ehca_dereg_internal_maxmr(struct ehca_shca *shca) { int ret; struct ehca_mr *e_maxmr; struct ib_pd *ib_pd; if (!shca->maxmr) { ehca_err(&shca->ib_device, "bad call, shca=%p", shca); ret = -EINVAL; goto ehca_dereg_internal_maxmr_exit0; } e_maxmr = shca->maxmr; ib_pd = e_maxmr->ib.ib_mr.pd; shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */ ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr); if (ret) { ehca_err(&shca->ib_device, "dereg internal max-MR failed, " "ret=%i e_maxmr=%p shca=%p lkey=%x", ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey); shca->maxmr = e_maxmr; goto ehca_dereg_internal_maxmr_exit0; } atomic_dec(&ib_pd->usecnt); ehca_dereg_internal_maxmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p", ret, shca, shca->maxmr); return ret; } /* end ehca_dereg_internal_maxmr() */ /*----------------------------------------------------------------------*/ /* * check physical buffer array of MR verbs for validness and * calculates MR size */ int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, int num_phys_buf, u64 *iova_start, u64 *size) { struct ib_phys_buf *pbuf = phys_buf_array; u64 size_count = 0; u32 i; if (num_phys_buf == 0) { ehca_gen_err("bad phys buf array len, num_phys_buf=0"); return -EINVAL; } /* check first buffer */ if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { ehca_gen_err("iova_start/addr mismatch, iova_start=%p " "pbuf->addr=%llx pbuf->size=%llx", iova_start, pbuf->addr, pbuf->size); return -EINVAL; } if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && (num_phys_buf > 1)) { ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx " "pbuf->size=%llx", pbuf->addr, pbuf->size); return -EINVAL; } for (i = 0; i < num_phys_buf; i++) { if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { ehca_gen_err("bad address, i=%x pbuf->addr=%llx " "pbuf->size=%llx", i, pbuf->addr, pbuf->size); return -EINVAL; } if (((i > 0) && /* not 1st */ (i < (num_phys_buf - 1)) && /* not last */ (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { ehca_gen_err("bad size, i=%x pbuf->size=%llx", i, pbuf->size); return -EINVAL; } size_count += pbuf->size; pbuf++; } *size = size_count; return 0; } /* end ehca_mr_chk_buf_and_calc_size() */ /*----------------------------------------------------------------------*/ /* check page list of map FMR verb for validness */ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, u64 *page_list, int list_len) { u32 i; u64 *page; if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) { ehca_gen_err("bad list_len, list_len=%x " "e_fmr->fmr_max_pages=%x fmr=%p", list_len, e_fmr->fmr_max_pages, e_fmr); return -EINVAL; } /* each page must be aligned */ page = page_list; for (i = 0; i < list_len; i++) { if (*page % e_fmr->fmr_page_size) { ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p " "fmr_page_size=%x", i, *page, page, e_fmr, e_fmr->fmr_page_size); return -EINVAL; } page++; } return 0; } /* end ehca_fmr_check_page_list() */ /*----------------------------------------------------------------------*/ /* PAGE_SIZE >= pginfo->hwpage_size */ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *chunk; u64 pgaddr; u32 i = 0; u32 j = 0; int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; /* loop over desired chunk entries */ chunk = pginfo->u.usr.next_chunk; prev_chunk = pginfo->u.usr.next_chunk; list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ; *kpage = phys_to_abs(pgaddr + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%llx " "chunk->page_list[i]=%llx " "i=%x next_hwpage=%llx", pgaddr, (u64)sg_dma_address( &chunk->page_list[i]), i, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; (pginfo->next_hwpage)++; kpage++; if (pginfo->next_hwpage % hwpages_per_kpage == 0) { (pginfo->kpage_cnt)++; (pginfo->u.usr.next_nmap)++; pginfo->next_hwpage = 0; i++; } j++; if (j >= number) break; } if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (j >= number)) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; break; } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; } else if (j >= number) break; else prev_chunk = chunk; } pginfo->u.usr.next_chunk = list_prepare_entry(prev_chunk, (&(pginfo->u.usr.region->chunk_list)), list); return ret; } /* * check given pages for contiguous layout * last page addr is returned in prev_pgaddr for further check */ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, int start_idx, int end_idx, u64 *prev_pgaddr) { int t; for (t = start_idx; t <= end_idx; t++) { u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; if (ehca_debug_level >= 3) ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); if (pgaddr - PAGE_SIZE != *prev_pgaddr) { ehca_gen_err("uncontiguous page found pgaddr=%llx " "prev_pgaddr=%llx page_list_i=%x", pgaddr, *prev_pgaddr, t); return -EINVAL; } *prev_pgaddr = pgaddr; } return 0; } /* PAGE_SIZE < pginfo->hwpage_size */ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *chunk; u64 pgaddr, prev_pgaddr; u32 i = 0; u32 j = 0; int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; int nr_kpages = kpages_per_hwpage; /* loop over desired chunk entries */ chunk = pginfo->u.usr.next_chunk; prev_chunk = pginfo->u.usr.next_chunk; list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { if (nr_kpages == kpages_per_hwpage) { pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ); *kpage = phys_to_abs(pgaddr); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%llx i=%x", pgaddr, i); ret = -EFAULT; return ret; } /* * The first page in a hwpage must be aligned; * the first MR page is exempt from this rule. */ if (pgaddr & (pginfo->hwpage_size - 1)) { if (pginfo->hwpage_cnt) { ehca_gen_err( "invalid alignment " "pgaddr=%llx i=%x " "mr_pgsize=%llx", pgaddr, i, pginfo->hwpage_size); ret = -EFAULT; return ret; } /* first MR page */ pginfo->kpage_cnt = (pgaddr & (pginfo->hwpage_size - 1)) >> PAGE_SHIFT; nr_kpages -= pginfo->kpage_cnt; *kpage = phys_to_abs( pgaddr & ~(pginfo->hwpage_size - 1)); } if (ehca_debug_level >= 3) { u64 val = *(u64 *)abs_to_virt( phys_to_abs(pgaddr)); ehca_gen_dbg("kpage=%llx chunk_page=%llx " "value=%016llx", *kpage, pgaddr, val); } prev_pgaddr = pgaddr; i++; pginfo->kpage_cnt++; pginfo->u.usr.next_nmap++; nr_kpages--; if (!nr_kpages) goto next_kpage; continue; } if (i + nr_kpages > chunk->nmap) { ret = ehca_check_kpages_per_ate( chunk->page_list, i, chunk->nmap - 1, &prev_pgaddr); if (ret) return ret; pginfo->kpage_cnt += chunk->nmap - i; pginfo->u.usr.next_nmap += chunk->nmap - i; nr_kpages -= chunk->nmap - i; break; } ret = ehca_check_kpages_per_ate(chunk->page_list, i, i + nr_kpages - 1, &prev_pgaddr); if (ret) return ret; i += nr_kpages; pginfo->kpage_cnt += nr_kpages; pginfo->u.usr.next_nmap += nr_kpages; next_kpage: nr_kpages = kpages_per_hwpage; (pginfo->hwpage_cnt)++; kpage++; j++; if (j >= number) break; } if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (j >= number)) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; break; } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; } else if (j >= number) break; else prev_chunk = chunk; } pginfo->u.usr.next_chunk = list_prepare_entry(prev_chunk, (&(pginfo->u.usr.region->chunk_list)), list); return ret; } static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; struct ib_phys_buf *pbuf; u64 num_hw, offs_hw; u32 i = 0; /* loop over desired phys_buf_array entries */ while (i < number) { pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf; num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) + pbuf->size, pginfo->hwpage_size); offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size; while (pginfo->next_hwpage < offs_hw + num_hw) { /* sanity check */ if ((pginfo->kpage_cnt >= pginfo->num_kpages) || (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { ehca_gen_err("kpage_cnt >= num_kpages, " "kpage_cnt=%llx num_kpages=%llx " "hwpage_cnt=%llx " "num_hwpages=%llx i=%x", pginfo->kpage_cnt, pginfo->num_kpages, pginfo->hwpage_cnt, pginfo->num_hwpages, i); return -EFAULT; } *kpage = phys_to_abs( (pbuf->addr & ~(pginfo->hwpage_size - 1)) + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) && pbuf->addr ) { ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " "next_hwpage=%llx", pbuf->addr, pbuf->size, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; (pginfo->next_hwpage)++; if (PAGE_SIZE >= pginfo->hwpage_size) { if (pginfo->next_hwpage % (PAGE_SIZE / pginfo->hwpage_size) == 0) (pginfo->kpage_cnt)++; } else pginfo->kpage_cnt += pginfo->hwpage_size / PAGE_SIZE; kpage++; i++; if (i >= number) break; } if (pginfo->next_hwpage >= offs_hw + num_hw) { (pginfo->u.phy.next_buf)++; pginfo->next_hwpage = 0; } } return ret; } static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; u64 *fmrlist; u32 i; /* loop over desired page_list entries */ fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; for (i = 0; i < number; i++) { *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + pginfo->next_hwpage * pginfo->hwpage_size); if ( !(*kpage) ) { ehca_gen_err("*fmrlist=%llx fmrlist=%p " "next_listelem=%llx next_hwpage=%llx", *fmrlist, fmrlist, pginfo->u.fmr.next_listelem, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) { if (pginfo->next_hwpage % (pginfo->u.fmr.fmr_pgsize / pginfo->hwpage_size) == 0) { (pginfo->kpage_cnt)++; (pginfo->u.fmr.next_listelem)++; fmrlist++; pginfo->next_hwpage = 0; } else (pginfo->next_hwpage)++; } else { unsigned int cnt_per_hwpage = pginfo->hwpage_size / pginfo->u.fmr.fmr_pgsize; unsigned int j; u64 prev = *kpage; /* check if adrs are contiguous */ for (j = 1; j < cnt_per_hwpage; j++) { u64 p = phys_to_abs(fmrlist[j] & ~(pginfo->hwpage_size - 1)); if (prev + pginfo->u.fmr.fmr_pgsize != p) { ehca_gen_err("uncontiguous fmr pages " "found prev=%llx p=%llx " "idx=%x", prev, p, i + j); return -EINVAL; } prev = p; } pginfo->kpage_cnt += cnt_per_hwpage; pginfo->u.fmr.next_listelem += cnt_per_hwpage; fmrlist += cnt_per_hwpage; } kpage++; } return ret; } /* setup page buffer from page info */ int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret; switch (pginfo->type) { case EHCA_MR_PGI_PHYS: ret = ehca_set_pagebuf_phys(pginfo, number, kpage); break; case EHCA_MR_PGI_USER: ret = PAGE_SIZE >= pginfo->hwpage_size ? ehca_set_pagebuf_user1(pginfo, number, kpage) : ehca_set_pagebuf_user2(pginfo, number, kpage); break; case EHCA_MR_PGI_FMR: ret = ehca_set_pagebuf_fmr(pginfo, number, kpage); break; default: ehca_gen_err("bad pginfo->type=%x", pginfo->type); ret = -EFAULT; break; } return ret; } /* end ehca_set_pagebuf() */ /*----------------------------------------------------------------------*/ /* * check MR if it is a max-MR, i.e. uses whole memory * in case it's a max-MR 1 is returned, else 0 */ int ehca_mr_is_maxmr(u64 size, u64 *iova_start) { /* a MR is treated as max-MR only if it fits following: */ if ((size == ehca_mr_len) && (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) { ehca_gen_dbg("this is a max-MR"); return 1; } else return 0; } /* end ehca_mr_is_maxmr() */ /*----------------------------------------------------------------------*/ /* map access control for MR/MW. This routine is used for MR and MW. */ void ehca_mrmw_map_acl(int ib_acl, u32 *hipz_acl) { *hipz_acl = 0; if (ib_acl & IB_ACCESS_REMOTE_READ) *hipz_acl |= HIPZ_ACCESSCTRL_R_READ; if (ib_acl & IB_ACCESS_REMOTE_WRITE) *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE; if (ib_acl & IB_ACCESS_REMOTE_ATOMIC) *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC; if (ib_acl & IB_ACCESS_LOCAL_WRITE) *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE; if (ib_acl & IB_ACCESS_MW_BIND) *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND; } /* end ehca_mrmw_map_acl() */ /*----------------------------------------------------------------------*/ /* sets page size in hipz access control for MR/MW. */ void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/ { *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24); } /* end ehca_mrmw_set_pgsize_hipz_acl() */ /*----------------------------------------------------------------------*/ /* * reverse map access control for MR/MW. * This routine is used for MR and MW. */ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, int *ib_acl) /*OUT*/ { *ib_acl = 0; if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ) *ib_acl |= IB_ACCESS_REMOTE_READ; if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE) *ib_acl |= IB_ACCESS_REMOTE_WRITE; if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC) *ib_acl |= IB_ACCESS_REMOTE_ATOMIC; if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE) *ib_acl |= IB_ACCESS_LOCAL_WRITE; if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND) *ib_acl |= IB_ACCESS_MW_BIND; } /* end ehca_mrmw_reverse_map_acl() */ /*----------------------------------------------------------------------*/ /* * MR destructor and constructor * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, * except struct ib_mr and spinlock */ void ehca_mr_deletenew(struct ehca_mr *mr) { mr->flags = 0; mr->num_kpages = 0; mr->num_hwpages = 0; mr->acl = 0; mr->start = NULL; mr->fmr_page_size = 0; mr->fmr_max_pages = 0; mr->fmr_max_maps = 0; mr->fmr_map_cnt = 0; memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); memset(&mr->galpas, 0, sizeof(mr->galpas)); } /* end ehca_mr_deletenew() */ int ehca_init_mrmw_cache(void) { mr_cache = kmem_cache_create("ehca_cache_mr", sizeof(struct ehca_mr), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mr_cache) return -ENOMEM; mw_cache = kmem_cache_create("ehca_cache_mw", sizeof(struct ehca_mw), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mw_cache) { kmem_cache_destroy(mr_cache); mr_cache = NULL; return -ENOMEM; } return 0; } void ehca_cleanup_mrmw_cache(void) { if (mr_cache) kmem_cache_destroy(mr_cache); if (mw_cache) kmem_cache_destroy(mw_cache); } static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap, int dir) { if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) { ehca_top_bmap->dir[dir] = kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL); if (!ehca_top_bmap->dir[dir]) return -ENOMEM; /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE); } return 0; } static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir) { if (!ehca_bmap_valid(ehca_bmap->top[top])) { ehca_bmap->top[top] = kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL); if (!ehca_bmap->top[top]) return -ENOMEM; /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE); } return ehca_init_top_bmap(ehca_bmap->top[top], dir); } static inline int ehca_calc_index(unsigned long i, unsigned long s) { return (i >> s) & EHCA_INDEX_MASK; } void ehca_destroy_busmap(void) { int top, dir; if (!ehca_bmap) return; for (top = 0; top < EHCA_MAP_ENTRIES; top++) { if (!ehca_bmap_valid(ehca_bmap->top[top])) continue; for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) continue; kfree(ehca_bmap->top[top]->dir[dir]); } kfree(ehca_bmap->top[top]); } kfree(ehca_bmap); ehca_bmap = NULL; } static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages) { unsigned long i, start_section, end_section; int top, dir, idx; if (!nr_pages) return 0; if (!ehca_bmap) { ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL); if (!ehca_bmap) return -ENOMEM; /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE); } start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE; end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; for (i = start_section; i < end_section; i++) { int ret; top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT); dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT); idx = i & EHCA_INDEX_MASK; ret = ehca_init_bmap(ehca_bmap, top, dir); if (ret) { ehca_destroy_busmap(); return ret; } ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len; ehca_mr_len += EHCA_SECTSIZE; } return 0; } static int ehca_is_hugepage(unsigned long pfn) { int page_order; if (pfn & EHCA_HUGEPAGE_PFN_MASK) return 0; page_order = compound_order(pfn_to_page(pfn)); if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT) return 0; return 1; } static int ehca_create_busmap_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) { int ret; unsigned long pfn, start_pfn, end_pfn, nr_pages; if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE) return ehca_update_busmap(initial_pfn, total_nr_pages); /* Given chunk is >= 16GB -> check for hugepages */ start_pfn = initial_pfn; end_pfn = initial_pfn + total_nr_pages; pfn = start_pfn; while (pfn < end_pfn) { if (ehca_is_hugepage(pfn)) { /* Add mem found in front of the hugepage */ nr_pages = pfn - start_pfn; ret = ehca_update_busmap(start_pfn, nr_pages); if (ret) return ret; /* Skip the hugepage */ pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE); start_pfn = pfn; } else pfn += (EHCA_SECTSIZE / PAGE_SIZE); } /* Add mem found behind the hugepage(s) */ nr_pages = pfn - start_pfn; return ehca_update_busmap(start_pfn, nr_pages); } int ehca_create_busmap(void) { int ret; ehca_mr_len = 0; ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL, ehca_create_busmap_callback); return ret; } static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, struct ehca_mr *e_mr, struct ehca_mr_pginfo *pginfo) { int top; u64 hret, *kpage; kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!kpage) { ehca_err(&shca->ib_device, "kpage alloc failed"); return -ENOMEM; } for (top = 0; top < EHCA_MAP_ENTRIES; top++) { if (!ehca_bmap_valid(ehca_bmap->top[top])) continue; hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo); if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) break; } ehca_free_fw_ctrlblock(kpage); if (hret == H_SUCCESS) return 0; /* Everything is fine */ else { ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, " "h_ret=%lli e_mr=%p top=%x lkey=%x " "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top, e_mr->ib.ib_mr.lkey, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle); return ehca2ib_return_code(hret); } } static u64 ehca_map_vaddr(void *caddr) { int top, dir, idx; unsigned long abs_addr, offset; u64 entry; if (!ehca_bmap) return EHCA_INVAL_ADDR; abs_addr = virt_to_abs(caddr); top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT); if (!ehca_bmap_valid(ehca_bmap->top[top])) return EHCA_INVAL_ADDR; dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT); if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) return EHCA_INVAL_ADDR; idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT); entry = ehca_bmap->top[top]->dir[dir]->ent[idx]; if (ehca_bmap_valid(entry)) { offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1); return entry | offset; } else return EHCA_INVAL_ADDR; } static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { return dma_addr == EHCA_INVAL_ADDR; } static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { if (cpu_addr) return ehca_map_vaddr(cpu_addr); else return EHCA_INVAL_ADDR; } static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { /* This is only a stub; nothing to be done here */ } static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { u64 addr; if (offset + size > PAGE_SIZE) return EHCA_INVAL_ADDR; addr = ehca_map_vaddr(page_address(page)); if (!ehca_dma_mapping_error(dev, addr)) addr += offset; return addr; } static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { /* This is only a stub; nothing to be done here */ } static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) { u64 addr; addr = ehca_map_vaddr(sg_virt(sg)); if (ehca_dma_mapping_error(dev, addr)) return 0; sg->dma_address = addr; sg->dma_length = sg->length; } return nents; } static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { /* This is only a stub; nothing to be done here */ } static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg) { return sg->dma_address; } static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg) { return sg->length; } static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); } static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_device(dev->dma_device, addr, size, dir); } static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size, u64 *dma_handle, gfp_t flag) { struct page *p; void *addr = NULL; u64 dma_addr; p = alloc_pages(flag, get_order(size)); if (p) { addr = page_address(p); dma_addr = ehca_map_vaddr(addr); if (ehca_dma_mapping_error(dev, dma_addr)) { free_pages((unsigned long)addr, get_order(size)); return NULL; } if (dma_handle) *dma_handle = dma_addr; return addr; } return NULL; } static void ehca_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, u64 dma_handle) { if (cpu_addr && size) free_pages((unsigned long)cpu_addr, get_order(size)); } struct ib_dma_mapping_ops ehca_dma_mapping_ops = { .mapping_error = ehca_dma_mapping_error, .map_single = ehca_dma_map_single, .unmap_single = ehca_dma_unmap_single, .map_page = ehca_dma_map_page, .unmap_page = ehca_dma_unmap_page, .map_sg = ehca_dma_map_sg, .unmap_sg = ehca_dma_unmap_sg, .dma_address = ehca_dma_address, .dma_len = ehca_dma_len, .sync_single_for_cpu = ehca_dma_sync_single_for_cpu, .sync_single_for_device = ehca_dma_sync_single_for_device, .alloc_coherent = ehca_dma_alloc_coherent, .free_coherent = ehca_dma_free_coherent, };
gpl-2.0
Entropy512/linux_kernel_sgh-i997r
drivers/pcmcia/m32r_pcc.c
803
16888
/* * drivers/pcmcia/m32r_pcc.c * * Device driver for the PCMCIA functionality of M32R. * * Copyright (c) 2001, 2002, 2003, 2004 * Hiroyuki Kondo, Naoto Sugai, Hayato Fujiwara */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/system.h> #include <asm/addrspace.h> #include <pcmcia/cs_types.h> #include <pcmcia/ss.h> #include <pcmcia/cs.h> /* XXX: should be moved into asm/irq.h */ #define PCC0_IRQ 24 #define PCC1_IRQ 25 #include "m32r_pcc.h" #define CHAOS_PCC_DEBUG #ifdef CHAOS_PCC_DEBUG static volatile u_short dummy_readbuf; #endif #define PCC_DEBUG_DBEX /* Poll status interval -- 0 means default to interrupt */ static int poll_interval = 0; typedef enum pcc_space { as_none = 0, as_comm, as_attr, as_io } pcc_as_t; typedef struct pcc_socket { u_short type, flags; struct pcmcia_socket socket; unsigned int number; unsigned int ioaddr; u_long mapaddr; u_long base; /* PCC register base */ u_char cs_irq, intr; pccard_io_map io_map[MAX_IO_WIN]; pccard_mem_map mem_map[MAX_WIN]; u_char io_win; u_char mem_win; pcc_as_t current_space; u_char last_iodbex; #ifdef CHAOS_PCC_DEBUG u_char last_iosize; #endif #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc; #endif } pcc_socket_t; static int pcc_sockets = 0; static pcc_socket_t socket[M32R_MAX_PCC] = { { 0, }, /* ... */ }; /*====================================================================*/ static unsigned int pcc_get(u_short, unsigned int); static void pcc_set(u_short, unsigned int , unsigned int ); static DEFINE_SPINLOCK(pcc_lock); void pcc_iorw(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int wr, int flag) { u_long addr; u_long flags; int need_ex; #ifdef PCC_DEBUG_DBEX int _dbex; #endif pcc_socket_t *t = &socket[sock]; #ifdef CHAOS_PCC_DEBUG int map_changed = 0; #endif /* Need lock ? */ spin_lock_irqsave(&pcc_lock, flags); /* * Check if need dbex */ need_ex = (size > 1 && flag == 0) ? PCMOD_DBEX : 0; #ifdef PCC_DEBUG_DBEX _dbex = need_ex; need_ex = 0; #endif /* * calculate access address */ addr = t->mapaddr + port - t->ioaddr + KSEG1; /* XXX */ /* * Check current mapping */ if (t->current_space != as_io || t->last_iodbex != need_ex) { u_long cbsz; /* * Disable first */ pcc_set(sock, PCCR, 0); /* * Set mode and io address */ cbsz = (t->flags & MAP_16BIT) ? 0 : PCMOD_CBSZ; pcc_set(sock, PCMOD, PCMOD_AS_IO | cbsz | need_ex); pcc_set(sock, PCADR, addr & 0x1ff00000); /* * Enable and read it */ pcc_set(sock, PCCR, 1); #ifdef CHAOS_PCC_DEBUG #if 0 map_changed = (t->current_space == as_attr && size == 2); /* XXX */ #else map_changed = 1; #endif #endif t->current_space = as_io; } /* * access to IO space */ if (size == 1) { /* Byte */ unsigned char *bp = (unsigned char *)buf; #ifdef CHAOS_DEBUG if (map_changed) { dummy_readbuf = readb(addr); } #endif if (wr) { /* write Byte */ while (nmemb--) { writeb(*bp++, addr); } } else { /* read Byte */ while (nmemb--) { *bp++ = readb(addr); } } } else { /* Word */ unsigned short *bp = (unsigned short *)buf; #ifdef CHAOS_PCC_DEBUG if (map_changed) { dummy_readbuf = readw(addr); } #endif if (wr) { /* write Word */ while (nmemb--) { #ifdef PCC_DEBUG_DBEX if (_dbex) { unsigned char *cp = (unsigned char *)bp; unsigned short tmp; tmp = cp[1] << 8 | cp[0]; writew(tmp, addr); bp++; } else #endif writew(*bp++, addr); } } else { /* read Word */ while (nmemb--) { #ifdef PCC_DEBUG_DBEX if (_dbex) { unsigned char *cp = (unsigned char *)bp; unsigned short tmp; tmp = readw(addr); cp[0] = tmp & 0xff; cp[1] = (tmp >> 8) & 0xff; bp++; } else #endif *bp++ = readw(addr); } } } #if 1 /* addr is no longer used */ if ((addr = pcc_get(sock, PCIRC)) & PCIRC_BWERR) { printk("m32r_pcc: BWERR detected : port 0x%04lx : iosize %dbit\n", port, size * 8); pcc_set(sock, PCIRC, addr); } #endif /* * save state */ t->last_iosize = size; t->last_iodbex = need_ex; /* Need lock ? */ spin_unlock_irqrestore(&pcc_lock,flags); return; } void pcc_ioread(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int flag) { pcc_iorw(sock, port, buf, size, nmemb, 0, flag); } void pcc_iowrite(int sock, unsigned long port, void *buf, size_t size, size_t nmemb, int flag) { pcc_iorw(sock, port, buf, size, nmemb, 1, flag); } /*====================================================================*/ #define IS_REGISTERED 0x2000 #define IS_ALIVE 0x8000 typedef struct pcc_t { char *name; u_short flags; } pcc_t; static pcc_t pcc[] = { { "xnux2", 0 }, { "xnux2", 0 }, }; static irqreturn_t pcc_interrupt(int, void *); /*====================================================================*/ static struct timer_list poll_timer; static unsigned int pcc_get(u_short sock, unsigned int reg) { return inl(socket[sock].base + reg); } static void pcc_set(u_short sock, unsigned int reg, unsigned int data) { outl(data, socket[sock].base + reg); } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non PC Card) Linux driver. We leave these alone. We make an exception for cards that seem to be serial devices. ======================================================================*/ static int __init is_alive(u_short sock) { unsigned int stat; unsigned int f; stat = pcc_get(sock, PCIRC); f = (stat & (PCIRC_CDIN1 | PCIRC_CDIN2)) >> 16; if(!f){ printk("m32r_pcc: No Card is detected at socket %d : stat = 0x%08x\n",stat,sock); return 0; } if(f!=3) printk("m32r_pcc: Insertion fail (%.8x) at socket %d\n",stat,sock); else printk("m32r_pcc: Card is Inserted at socket %d(%.8x)\n",sock,stat); return 0; } static void add_pcc_socket(ulong base, int irq, ulong mapaddr, unsigned int ioaddr) { pcc_socket_t *t = &socket[pcc_sockets]; /* add sockets */ t->ioaddr = ioaddr; t->mapaddr = mapaddr; t->base = base; #ifdef CHAOS_PCC_DEBUG t->flags = MAP_16BIT; #else t->flags = 0; #endif if (is_alive(pcc_sockets)) t->flags |= IS_ALIVE; /* add pcc */ if (t->base > 0) { request_region(t->base, 0x20, "m32r-pcc"); } printk(KERN_INFO " %s ", pcc[pcc_sockets].name); printk("pcc at 0x%08lx\n", t->base); /* Update socket interrupt information, capabilities */ t->socket.features |= (SS_CAP_PCCARD | SS_CAP_STATIC_MAP); t->socket.map_size = M32R_PCC_MAPSIZE; t->socket.io_offset = ioaddr; /* use for io access offset */ t->socket.irq_mask = 0; t->socket.pci_irq = 2 + pcc_sockets; /* XXX */ request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt); pcc_sockets++; return; } /*====================================================================*/ static irqreturn_t pcc_interrupt(int irq, void *dev) { int i, j, irc; u_int events, active; int handled = 0; pr_debug("m32r_pcc: pcc_interrupt(%d)\n", irq); for (j = 0; j < 20; j++) { active = 0; for (i = 0; i < pcc_sockets; i++) { if ((socket[i].cs_irq != irq) && (socket[i].socket.pci_irq != irq)) continue; handled = 1; irc = pcc_get(i, PCIRC); irc >>=16; pr_debug("m32r_pcc: interrupt: socket %d pcirc 0x%02x ", i, irc); if (!irc) continue; events = (irc) ? SS_DETECT : 0; events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0; pr_debug("m32r_pcc: event 0x%02x\n", events); if (events) pcmcia_parse_events(&socket[i].socket, events); active |= events; active = 0; } if (!active) break; } if (j == 20) printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n"); pr_debug("m32r_pcc: interrupt done\n"); return IRQ_RETVAL(handled); } /* pcc_interrupt */ static void pcc_interrupt_wrapper(u_long data) { pcc_interrupt(0, NULL); init_timer(&poll_timer); poll_timer.expires = jiffies + poll_interval; add_timer(&poll_timer); } /*====================================================================*/ static int _pcc_get_status(u_short sock, u_int *value) { u_int status; status = pcc_get(sock,PCIRC); *value = ((status & PCIRC_CDIN1) && (status & PCIRC_CDIN2)) ? SS_DETECT : 0; status = pcc_get(sock,PCCR); #if 0 *value |= (status & PCCR_PCEN) ? SS_READY : 0; #else *value |= SS_READY; /* XXX: always */ #endif status = pcc_get(sock,PCCSIGCR); *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0; pr_debug("m32r_pcc: GetStatus(%d) = %#4.4x\n", sock, *value); return 0; } /* _get_status */ /*====================================================================*/ static int _pcc_set_socket(u_short sock, socket_state_t *state) { u_long reg = 0; pr_debug("m32r_pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)", sock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); if (state->Vcc) { /* * 5V only */ if (state->Vcc == 50) { reg |= PCCSIGCR_VEN; } else { return -EINVAL; } } if (state->flags & SS_RESET) { pr_debug("m32r_pcc: :RESET\n"); reg |= PCCSIGCR_CRST; } if (state->flags & SS_OUTPUT_ENA){ pr_debug("m32r_pcc: :OUTPUT_ENA\n"); /* bit clear */ } else { reg |= PCCSIGCR_SEN; } pcc_set(sock,PCCSIGCR,reg); if(state->flags & SS_IOCARD){ pr_debug("m32r_pcc: :IOCARD"); } if (state->flags & SS_PWR_AUTO) { pr_debug("m32r_pcc: :PWR_AUTO"); } if (state->csc_mask & SS_DETECT) pr_debug("m32r_pcc: :csc-SS_DETECT"); if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) pr_debug("m32r_pcc: :STSCHG"); } else { if (state->csc_mask & SS_BATDEAD) pr_debug("m32r_pcc: :BATDEAD"); if (state->csc_mask & SS_BATWARN) pr_debug("m32r_pcc: :BATWARN"); if (state->csc_mask & SS_READY) pr_debug("m32r_pcc: :READY"); } pr_debug("m32r_pcc: \n"); return 0; } /* _set_socket */ /*====================================================================*/ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) { u_char map; pr_debug("m32r_pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); map = io->map; return 0; } /* _set_io_map */ /*====================================================================*/ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem) { u_char map = mem->map; u_long mode; u_long addr; pcc_socket_t *t = &socket[sock]; #ifdef CHAOS_PCC_DEBUG #if 0 pcc_as_t last = t->current_space; #endif #endif pr_debug("m32r_pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx, %#x)\n", sock, map, mem->flags, mem->speed, (unsigned long long)mem->static_start, mem->card_start); /* * sanity check */ if ((map > MAX_WIN) || (mem->card_start > 0x3ffffff)){ return -EINVAL; } /* * de-activate */ if ((mem->flags & MAP_ACTIVE) == 0) { t->current_space = as_none; return 0; } /* * Disable first */ pcc_set(sock, PCCR, 0); /* * Set mode */ if (mem->flags & MAP_ATTRIB) { mode = PCMOD_AS_ATTRIB | PCMOD_CBSZ; t->current_space = as_attr; } else { mode = 0; /* common memory */ t->current_space = as_comm; } pcc_set(sock, PCMOD, mode); /* * Set address */ addr = t->mapaddr + (mem->card_start & M32R_PCC_MAPMASK); pcc_set(sock, PCADR, addr); mem->static_start = addr + mem->card_start; /* * Enable again */ pcc_set(sock, PCCR, 1); #ifdef CHAOS_PCC_DEBUG #if 0 if (last != as_attr) { #else if (1) { #endif dummy_readbuf = *(u_char *)(addr + KSEG1); } #endif return 0; } /* _set_mem_map */ #if 0 /* driver model ordering issue */ /*====================================================================== Routines for accessing socket information and register dumps via /proc/bus/pccard/... ======================================================================*/ static ssize_t show_info(struct class_device *class_dev, char *buf) { pcc_socket_t *s = container_of(class_dev, struct pcc_socket, socket.dev); return sprintf(buf, "type: %s\nbase addr: 0x%08lx\n", pcc[s->type].name, s->base); } static ssize_t show_exca(struct class_device *class_dev, char *buf) { /* FIXME */ return 0; } static CLASS_DEVICE_ATTR(info, S_IRUGO, show_info, NULL); static CLASS_DEVICE_ATTR(exca, S_IRUGO, show_exca, NULL); #endif /*====================================================================*/ /* this is horribly ugly... proper locking needs to be done here at * some time... */ #define LOCKED(x) do { \ int retval; \ unsigned long flags; \ spin_lock_irqsave(&pcc_lock, flags); \ retval = x; \ spin_unlock_irqrestore(&pcc_lock, flags); \ return retval; \ } while (0) static int pcc_get_status(struct pcmcia_socket *s, u_int *value) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) { *value = 0; return -EINVAL; } LOCKED(_pcc_get_status(sock, value)); } static int pcc_set_socket(struct pcmcia_socket *s, socket_state_t *state) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) return -EINVAL; LOCKED(_pcc_set_socket(sock, state)); } static int pcc_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) return -EINVAL; LOCKED(_pcc_set_io_map(sock, io)); } static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem) { unsigned int sock = container_of(s, struct pcc_socket, socket)->number; if (socket[sock].flags & IS_ALIVE) return -EINVAL; LOCKED(_pcc_set_mem_map(sock, mem)); } static int pcc_init(struct pcmcia_socket *s) { pr_debug("m32r_pcc: init call\n"); return 0; } static struct pccard_operations pcc_operations = { .init = pcc_init, .get_status = pcc_get_status, .set_socket = pcc_set_socket, .set_io_map = pcc_set_io_map, .set_mem_map = pcc_set_mem_map, }; /*====================================================================*/ static struct platform_driver pcc_driver = { .driver = { .name = "pcc", .owner = THIS_MODULE, }, }; static struct platform_device pcc_device = { .name = "pcc", .id = 0, }; /*====================================================================*/ static int __init init_m32r_pcc(void) { int i, ret; ret = platform_driver_register(&pcc_driver); if (ret) return ret; ret = platform_device_register(&pcc_device); if (ret){ platform_driver_unregister(&pcc_driver); return ret; } printk(KERN_INFO "m32r PCC probe:\n"); pcc_sockets = 0; add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE, 0x1000); #ifdef CONFIG_M32RPCC_SLOT2 add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE, 0x2000); #endif if (pcc_sockets == 0) { printk("socket is not found.\n"); platform_device_unregister(&pcc_device); platform_driver_unregister(&pcc_driver); return -ENODEV; } /* Set up interrupt handler(s) */ for (i = 0 ; i < pcc_sockets ; i++) { socket[i].socket.dev.parent = &pcc_device.dev; socket[i].socket.ops = &pcc_operations; socket[i].socket.resource_ops = &pccard_static_ops; socket[i].socket.owner = THIS_MODULE; socket[i].number = i; ret = pcmcia_register_socket(&socket[i].socket); if (!ret) socket[i].flags |= IS_REGISTERED; #if 0 /* driver model ordering issue */ class_device_create_file(&socket[i].socket.dev, &class_device_attr_info); class_device_create_file(&socket[i].socket.dev, &class_device_attr_exca); #endif } /* Finally, schedule a polling interrupt */ if (poll_interval != 0) { poll_timer.function = pcc_interrupt_wrapper; poll_timer.data = 0; init_timer(&poll_timer); poll_timer.expires = jiffies + poll_interval; add_timer(&poll_timer); } return 0; } /* init_m32r_pcc */ static void __exit exit_m32r_pcc(void) { int i; for (i = 0; i < pcc_sockets; i++) if (socket[i].flags & IS_REGISTERED) pcmcia_unregister_socket(&socket[i].socket); platform_device_unregister(&pcc_device); if (poll_interval != 0) del_timer_sync(&poll_timer); platform_driver_unregister(&pcc_driver); } /* exit_m32r_pcc */ module_init(init_m32r_pcc); module_exit(exit_m32r_pcc); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/
gpl-2.0
maxwen/primou-kernel-HELLBOY
kernel/cred.c
803
22448
/* Task credentials management - see Documentation/security/credentials.txt * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/cred.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/init_task.h> #include <linux/security.h> #include <linux/cn_proc.h> #if 0 #define kdebug(FMT, ...) \ printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) #else #define kdebug(FMT, ...) \ no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) #endif static struct kmem_cache *cred_jar; /* * The common credentials for the initial task's thread group */ #ifdef CONFIG_KEYS static struct thread_group_cred init_tgcred = { .usage = ATOMIC_INIT(2), .tgid = 0, .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock), }; #endif /* * The initial credentials for the initial task */ struct cred init_cred = { .usage = ATOMIC_INIT(4), #ifdef CONFIG_DEBUG_CREDENTIALS .subscribers = ATOMIC_INIT(2), .magic = CRED_MAGIC, #endif .securebits = SECUREBITS_DEFAULT, .cap_inheritable = CAP_EMPTY_SET, .cap_permitted = CAP_FULL_SET, .cap_effective = CAP_FULL_SET, .cap_bset = CAP_FULL_SET, .user = INIT_USER, .user_ns = &init_user_ns, .group_info = &init_groups, #ifdef CONFIG_KEYS .tgcred = &init_tgcred, #endif }; static inline void set_cred_subscribers(struct cred *cred, int n) { #ifdef CONFIG_DEBUG_CREDENTIALS atomic_set(&cred->subscribers, n); #endif } static inline int read_cred_subscribers(const struct cred *cred) { #ifdef CONFIG_DEBUG_CREDENTIALS return atomic_read(&cred->subscribers); #else return 0; #endif } static inline void alter_cred_subscribers(const struct cred *_cred, int n) { #ifdef CONFIG_DEBUG_CREDENTIALS struct cred *cred = (struct cred *) _cred; atomic_add(n, &cred->subscribers); #endif } /* * Dispose of the shared task group credentials */ #ifdef CONFIG_KEYS static void release_tgcred_rcu(struct rcu_head *rcu) { struct thread_group_cred *tgcred = container_of(rcu, struct thread_group_cred, rcu); BUG_ON(atomic_read(&tgcred->usage) != 0); key_put(tgcred->session_keyring); key_put(tgcred->process_keyring); kfree(tgcred); } #endif /* * Release a set of thread group credentials. */ static void release_tgcred(struct cred *cred) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred = cred->tgcred; if (atomic_dec_and_test(&tgcred->usage)) call_rcu(&tgcred->rcu, release_tgcred_rcu); #endif } /* * The RCU callback to actually dispose of a set of credentials */ static void put_cred_rcu(struct rcu_head *rcu) { struct cred *cred = container_of(rcu, struct cred, rcu); kdebug("put_cred_rcu(%p)", cred); #ifdef CONFIG_DEBUG_CREDENTIALS if (cred->magic != CRED_MAGIC_DEAD || atomic_read(&cred->usage) != 0 || read_cred_subscribers(cred) != 0) panic("CRED: put_cred_rcu() sees %p with" " mag %x, put %p, usage %d, subscr %d\n", cred, cred->magic, cred->put_addr, atomic_read(&cred->usage), read_cred_subscribers(cred)); #else if (atomic_read(&cred->usage) != 0) panic("CRED: put_cred_rcu() sees %p with usage %d\n", cred, atomic_read(&cred->usage)); #endif security_cred_free(cred); key_put(cred->thread_keyring); key_put(cred->request_key_auth); release_tgcred(cred); if (cred->group_info) put_group_info(cred->group_info); free_uid(cred->user); kmem_cache_free(cred_jar, cred); } /** * __put_cred - Destroy a set of credentials * @cred: The record to release * * Destroy a set of credentials on which no references remain. */ void __put_cred(struct cred *cred) { kdebug("__put_cred(%p{%d,%d})", cred, atomic_read(&cred->usage), read_cred_subscribers(cred)); BUG_ON(atomic_read(&cred->usage) != 0); #ifdef CONFIG_DEBUG_CREDENTIALS BUG_ON(read_cred_subscribers(cred) != 0); cred->magic = CRED_MAGIC_DEAD; cred->put_addr = __builtin_return_address(0); #endif BUG_ON(cred == current->cred); BUG_ON(cred == current->real_cred); call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); /* * Clean up a task's credentials when it exits */ void exit_creds(struct task_struct *tsk) { struct cred *cred; kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred, atomic_read(&tsk->cred->usage), read_cred_subscribers(tsk->cred)); cred = (struct cred *) tsk->real_cred; tsk->real_cred = NULL; validate_creds(cred); alter_cred_subscribers(cred, -1); put_cred(cred); cred = (struct cred *) tsk->cred; tsk->cred = NULL; validate_creds(cred); alter_cred_subscribers(cred, -1); put_cred(cred); cred = (struct cred *) tsk->replacement_session_keyring; if (cred) { tsk->replacement_session_keyring = NULL; validate_creds(cred); put_cred(cred); } } /** * get_task_cred - Get another task's objective credentials * @task: The task to query * * Get the objective credentials of a task, pinning them so that they can't go * away. Accessing a task's credentials directly is not permitted. * * The caller must also make sure task doesn't get deleted, either by holding a * ref on task or by holding tasklist_lock to prevent it from being unlinked. */ const struct cred *get_task_cred(struct task_struct *task) { const struct cred *cred; rcu_read_lock(); do { cred = __task_cred((task)); BUG_ON(!cred); } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage)); rcu_read_unlock(); return cred; } /* * Allocate blank credentials, such that the credentials can be filled in at a * later date without risk of ENOMEM. */ struct cred *cred_alloc_blank(void) { struct cred *new; new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); if (!new) return NULL; #ifdef CONFIG_KEYS new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); if (!new->tgcred) { kmem_cache_free(cred_jar, new); return NULL; } atomic_set(&new->tgcred->usage, 1); #endif atomic_set(&new->usage, 1); #ifdef CONFIG_DEBUG_CREDENTIALS new->magic = CRED_MAGIC; #endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; return new; error: abort_creds(new); return NULL; } /** * prepare_creds - Prepare a new set of credentials for modification * * Prepare a new set of task credentials for modification. A task's creds * shouldn't generally be modified directly, therefore this function is used to * prepare a new copy, which the caller then modifies and then commits by * calling commit_creds(). * * Preparation involves making a copy of the objective creds for modification. * * Returns a pointer to the new creds-to-be if successful, NULL otherwise. * * Call commit_creds() or abort_creds() to clean up. */ struct cred *prepare_creds(void) { struct task_struct *task = current; const struct cred *old; struct cred *new; validate_process_creds(); new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; kdebug("prepare_creds() alloc %p", new); old = task->cred; memcpy(new, old, sizeof(struct cred)); atomic_set(&new->usage, 1); set_cred_subscribers(new, 0); get_group_info(new->group_info); get_uid(new->user); #ifdef CONFIG_KEYS key_get(new->thread_keyring); key_get(new->request_key_auth); atomic_inc(&new->tgcred->usage); #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; validate_creds(new); return new; error: abort_creds(new); return NULL; } EXPORT_SYMBOL(prepare_creds); /* * Prepare credentials for current to perform an execve() * - The caller must hold ->cred_guard_mutex */ struct cred *prepare_exec_creds(void) { struct thread_group_cred *tgcred = NULL; struct cred *new; #ifdef CONFIG_KEYS tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) return NULL; #endif new = prepare_creds(); if (!new) { kfree(tgcred); return new; } #ifdef CONFIG_KEYS /* newly exec'd tasks don't get a thread keyring */ key_put(new->thread_keyring); new->thread_keyring = NULL; /* create a new per-thread-group creds for all this set of threads to * share */ memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); /* inherit the session keyring; new process keyring */ key_get(tgcred->session_keyring); tgcred->process_keyring = NULL; release_tgcred(new); new->tgcred = tgcred; #endif return new; } /* * Copy credentials for the new process created by fork() * * We share if we can, but under some circumstances we have to generate a new * set. * * The new process gets the current process's subjective credentials as its * objective and subjective credentials */ int copy_creds(struct task_struct *p, unsigned long clone_flags) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred; #endif struct cred *new; int ret; if ( #ifdef CONFIG_KEYS !p->cred->thread_keyring && #endif clone_flags & CLONE_THREAD ) { p->real_cred = get_cred(p->cred); get_cred(p->cred); alter_cred_subscribers(p->cred, 2); kdebug("share_creds(%p{%d,%d})", p->cred, atomic_read(&p->cred->usage), read_cred_subscribers(p->cred)); atomic_inc(&p->cred->user->processes); return 0; } new = prepare_creds(); if (!new) return -ENOMEM; if (clone_flags & CLONE_NEWUSER) { ret = create_user_ns(new); if (ret < 0) goto error_put; } /* cache user_ns in cred. Doesn't need a refcount because it will * stay pinned by cred->user */ new->user_ns = new->user->user_ns; #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ if (new->thread_keyring) { key_put(new->thread_keyring); new->thread_keyring = NULL; if (clone_flags & CLONE_THREAD) install_thread_keyring_to_cred(new); } /* we share the process and session keyrings between all the threads in * a process - this is slightly icky as we violate COW credentials a * bit */ if (!(clone_flags & CLONE_THREAD)) { tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) { ret = -ENOMEM; goto error_put; } atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); tgcred->process_keyring = NULL; tgcred->session_keyring = key_get(new->tgcred->session_keyring); release_tgcred(new); new->tgcred = tgcred; } #endif atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); alter_cred_subscribers(new, 2); validate_creds(new); return 0; error_put: put_cred(new); return ret; } /** * commit_creds - Install new credentials upon the current task * @new: The credentials to be assigned * * Install a new set of credentials to the current task, using RCU to replace * the old set. Both the objective and the subjective credentials pointers are * updated. This function may not be called if the subjective credentials are * in an overridden state. * * This function eats the caller's reference to the new credentials. * * Always returns 0 thus allowing this function to be tail-called at the end * of, say, sys_setgid(). */ int commit_creds(struct cred *new) { struct task_struct *task = current; const struct cred *old = task->real_cred; kdebug("commit_creds(%p{%d,%d})", new, atomic_read(&new->usage), read_cred_subscribers(new)); BUG_ON(task->cred != old); #ifdef CONFIG_DEBUG_CREDENTIALS BUG_ON(read_cred_subscribers(old) < 2); validate_creds(old); validate_creds(new); #endif BUG_ON(atomic_read(&new->usage) < 1); get_cred(new); /* we will require a ref for the subj creds too */ /* dumpability changes */ if (old->euid != new->euid || old->egid != new->egid || old->fsuid != new->fsuid || old->fsgid != new->fsgid || !cap_issubset(new->cap_permitted, old->cap_permitted)) { if (task->mm) set_dumpable(task->mm, suid_dumpable); task->pdeath_signal = 0; smp_wmb(); } /* alter the thread keyring */ if (new->fsuid != old->fsuid) key_fsuid_changed(task); if (new->fsgid != old->fsgid) key_fsgid_changed(task); /* do it * - What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now * cheaply with the new uid cache, so if it matters * we should be checking for it. -DaveM */ alter_cred_subscribers(new, 2); if (new->user != old->user) atomic_inc(&new->user->processes); rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user) atomic_dec(&old->user->processes); alter_cred_subscribers(old, -2); /* send notifications */ if (new->uid != old->uid || new->euid != old->euid || new->suid != old->suid || new->fsuid != old->fsuid) proc_id_connector(task, PROC_EVENT_UID); if (new->gid != old->gid || new->egid != old->egid || new->sgid != old->sgid || new->fsgid != old->fsgid) proc_id_connector(task, PROC_EVENT_GID); /* release the old obj and subj refs both */ put_cred(old); put_cred(old); return 0; } EXPORT_SYMBOL(commit_creds); /** * abort_creds - Discard a set of credentials and unlock the current task * @new: The credentials that were going to be applied * * Discard a set of credentials that were under construction and unlock the * current task. */ void abort_creds(struct cred *new) { kdebug("abort_creds(%p{%d,%d})", new, atomic_read(&new->usage), read_cred_subscribers(new)); #ifdef CONFIG_DEBUG_CREDENTIALS BUG_ON(read_cred_subscribers(new) != 0); #endif BUG_ON(atomic_read(&new->usage) < 1); put_cred(new); } EXPORT_SYMBOL(abort_creds); /** * override_creds - Override the current process's subjective credentials * @new: The credentials to be assigned * * Install a set of temporary override subjective credentials on the current * process, returning the old set for later reversion. */ const struct cred *override_creds(const struct cred *new) { const struct cred *old = current->cred; kdebug("override_creds(%p{%d,%d})", new, atomic_read(&new->usage), read_cred_subscribers(new)); validate_creds(old); validate_creds(new); get_cred(new); alter_cred_subscribers(new, 1); rcu_assign_pointer(current->cred, new); alter_cred_subscribers(old, -1); kdebug("override_creds() = %p{%d,%d}", old, atomic_read(&old->usage), read_cred_subscribers(old)); return old; } EXPORT_SYMBOL(override_creds); /** * revert_creds - Revert a temporary subjective credentials override * @old: The credentials to be restored * * Revert a temporary set of override subjective credentials to an old set, * discarding the override set. */ void revert_creds(const struct cred *old) { const struct cred *override = current->cred; kdebug("revert_creds(%p{%d,%d})", old, atomic_read(&old->usage), read_cred_subscribers(old)); validate_creds(old); validate_creds(override); alter_cred_subscribers(old, 1); rcu_assign_pointer(current->cred, old); alter_cred_subscribers(override, -1); put_cred(override); } EXPORT_SYMBOL(revert_creds); /* * initialise the credentials stuff */ void __init cred_init(void) { /* allocate a slab in which we can store credentials */ cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } /** * prepare_kernel_cred - Prepare a set of credentials for a kernel service * @daemon: A userspace daemon to be used as a reference * * Prepare a set of credentials for a kernel service. This can then be used to * override a task's own credentials so that work can be done on behalf of that * task that requires a different subjective context. * * @daemon is used to provide a base for the security record, but can be NULL. * If @daemon is supplied, then the security data will be derived from that; * otherwise they'll be set to 0 and no groups, full capabilities and no keys. * * The caller may change these controls afterwards if desired. * * Returns the new credentials or NULL if out of memory. * * Does not take, and does not return holding current->cred_replace_mutex. */ struct cred *prepare_kernel_cred(struct task_struct *daemon) { const struct cred *old; struct cred *new; new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; kdebug("prepare_kernel_cred() alloc %p", new); if (daemon) old = get_task_cred(daemon); else old = get_cred(&init_cred); validate_creds(old); *new = *old; atomic_set(&new->usage, 1); set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); #ifdef CONFIG_KEYS atomic_inc(&init_tgcred.usage); new->tgcred = &init_tgcred; new->request_key_auth = NULL; new->thread_keyring = NULL; new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; put_cred(old); validate_creds(new); return new; error: put_cred(new); put_cred(old); return NULL; } EXPORT_SYMBOL(prepare_kernel_cred); /** * set_security_override - Set the security ID in a set of credentials * @new: The credentials to alter * @secid: The LSM security ID to set * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. */ int set_security_override(struct cred *new, u32 secid) { return security_kernel_act_as(new, secid); } EXPORT_SYMBOL(set_security_override); /** * set_security_override_from_ctx - Set the security ID in a set of credentials * @new: The credentials to alter * @secctx: The LSM security context to generate the security ID from. * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. The * security ID is specified in string form as a security context to be * interpreted by the LSM. */ int set_security_override_from_ctx(struct cred *new, const char *secctx) { u32 secid; int ret; ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); if (ret < 0) return ret; return set_security_override(new, secid); } EXPORT_SYMBOL(set_security_override_from_ctx); /** * set_create_files_as - Set the LSM file create context in a set of credentials * @new: The credentials to alter * @inode: The inode to take the context from * * Change the LSM file creation context in a set of credentials to be the same * as the object context of the specified inode, so that the new inodes have * the same MAC context as that inode. */ int set_create_files_as(struct cred *new, struct inode *inode) { new->fsuid = inode->i_uid; new->fsgid = inode->i_gid; return security_kernel_create_files_as(new, inode); } EXPORT_SYMBOL(set_create_files_as); #ifdef CONFIG_DEBUG_CREDENTIALS bool creds_are_invalid(const struct cred *cred) { if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX /* * cred->security == NULL if security_cred_alloc_blank() or * security_prepare_creds() returned an error. */ if (selinux_is_enabled() && cred->security) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)) return true; } #endif return false; } EXPORT_SYMBOL(creds_are_invalid); /* * dump invalid credentials */ static void dump_invalid_creds(const struct cred *cred, const char *label, const struct task_struct *tsk) { printk(KERN_ERR "CRED: %s credentials: %p %s%s%s\n", label, cred, cred == &init_cred ? "[init]" : "", cred == tsk->real_cred ? "[real]" : "", cred == tsk->cred ? "[eff]" : ""); printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n", cred->magic, cred->put_addr); printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n", atomic_read(&cred->usage), read_cred_subscribers(cred)); printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n", cred->uid, cred->euid, cred->suid, cred->fsuid); printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n", cred->gid, cred->egid, cred->sgid, cred->fsgid); #ifdef CONFIG_SECURITY printk(KERN_ERR "CRED: ->security is %p\n", cred->security); if ((unsigned long) cred->security >= PAGE_SIZE && (((unsigned long) cred->security & 0xffffff00) != (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))) printk(KERN_ERR "CRED: ->security {%x, %x}\n", ((u32*)cred->security)[0], ((u32*)cred->security)[1]); #endif } /* * report use of invalid credentials */ void __invalid_creds(const struct cred *cred, const char *file, unsigned line) { printk(KERN_ERR "CRED: Invalid credentials\n"); printk(KERN_ERR "CRED: At %s:%u\n", file, line); dump_invalid_creds(cred, "Specified", current); BUG(); } EXPORT_SYMBOL(__invalid_creds); /* * check the credentials on a process */ void __validate_process_creds(struct task_struct *tsk, const char *file, unsigned line) { if (tsk->cred == tsk->real_cred) { if (unlikely(read_cred_subscribers(tsk->cred) < 2 || creds_are_invalid(tsk->cred))) goto invalid_creds; } else { if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 || read_cred_subscribers(tsk->cred) < 1 || creds_are_invalid(tsk->real_cred) || creds_are_invalid(tsk->cred))) goto invalid_creds; } return; invalid_creds: printk(KERN_ERR "CRED: Invalid process credentials\n"); printk(KERN_ERR "CRED: At %s:%u\n", file, line); dump_invalid_creds(tsk->real_cred, "Real", tsk); if (tsk->cred != tsk->real_cred) dump_invalid_creds(tsk->cred, "Effective", tsk); else printk(KERN_ERR "CRED: Effective creds == Real creds\n"); BUG(); } EXPORT_SYMBOL(__validate_process_creds); /* * check creds for do_exit() */ void validate_creds_for_do_exit(struct task_struct *tsk) { kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})", tsk->real_cred, tsk->cred, atomic_read(&tsk->cred->usage), read_cred_subscribers(tsk->cred)); __validate_process_creds(tsk, __FILE__, __LINE__); } #endif /* CONFIG_DEBUG_CREDENTIALS */
gpl-2.0
PennPanda/linux
drivers/media/usb/gspca/stk014.c
1315
11105
/* * Syntek DV4000 (STK014) subdriver * * Copyright (C) 2008 Jean-Francois Moine (http://moinejf.free.fr) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stk014" #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("Syntek DV4000 (STK014) USB Camera Driver"); MODULE_LICENSE("GPL"); #define QUALITY 50 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 jpeg_hdr[JPEG_HDR_SZ]; }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* -- read a register -- */ static u8 reg_r(struct gspca_dev *gspca_dev, __u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; return 0; } return gspca_dev->usb_buf[0]; } /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, __u16 index, __u16 value) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* -- get a bulk value (4 bytes) -- */ static void rcv_val(struct gspca_dev *gspca_dev, int ads) { struct usb_device *dev = gspca_dev->dev; int alen, ret; reg_w(gspca_dev, 0x634, (ads >> 16) & 0xff); reg_w(gspca_dev, 0x635, (ads >> 8) & 0xff); reg_w(gspca_dev, 0x636, ads & 0xff); reg_w(gspca_dev, 0x637, 0); reg_w(gspca_dev, 0x638, 4); /* len & 0xff */ reg_w(gspca_dev, 0x639, 0); /* len >> 8 */ reg_w(gspca_dev, 0x63a, 0); reg_w(gspca_dev, 0x63b, 0); reg_w(gspca_dev, 0x630, 5); if (gspca_dev->usb_err < 0) return; ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, 0x05), gspca_dev->usb_buf, 4, /* length */ &alen, 500); /* timeout in milliseconds */ if (ret < 0) { pr_err("rcv_val err %d\n", ret); gspca_dev->usb_err = ret; } } /* -- send a bulk value -- */ static void snd_val(struct gspca_dev *gspca_dev, int ads, unsigned int val) { struct usb_device *dev = gspca_dev->dev; int alen, ret; __u8 seq = 0; if (ads == 0x003f08) { reg_r(gspca_dev, 0x0704); seq = reg_r(gspca_dev, 0x0705); reg_r(gspca_dev, 0x0650); reg_w(gspca_dev, 0x654, seq); } else { reg_w(gspca_dev, 0x654, (ads >> 16) & 0xff); } reg_w(gspca_dev, 0x655, (ads >> 8) & 0xff); reg_w(gspca_dev, 0x656, ads & 0xff); reg_w(gspca_dev, 0x657, 0); reg_w(gspca_dev, 0x658, 0x04); /* size */ reg_w(gspca_dev, 0x659, 0); reg_w(gspca_dev, 0x65a, 0); reg_w(gspca_dev, 0x65b, 0); reg_w(gspca_dev, 0x650, 5); if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = val >> 24; gspca_dev->usb_buf[1] = val >> 16; gspca_dev->usb_buf[2] = val >> 8; gspca_dev->usb_buf[3] = val; ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 6), gspca_dev->usb_buf, 4, &alen, 500); /* timeout in milliseconds */ if (ret < 0) { pr_err("snd_val err %d\n", ret); gspca_dev->usb_err = ret; } else { if (ads == 0x003f08) { seq += 4; seq &= 0x3f; reg_w(gspca_dev, 0x705, seq); } } } /* set a camera parameter */ static void set_par(struct gspca_dev *gspca_dev, int parval) { snd_val(gspca_dev, 0x003f08, parval); } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { int parval; parval = 0x06000000 /* whiteness */ + (val << 16); set_par(gspca_dev, parval); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { int parval; parval = 0x07000000 /* contrast */ + (val << 16); set_par(gspca_dev, parval); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { int parval; parval = 0x08000000 /* saturation */ + (val << 16); set_par(gspca_dev, parval); } static void setlightfreq(struct gspca_dev *gspca_dev, s32 val) { set_par(gspca_dev, val == 1 ? 0x33640000 /* 50 Hz */ : 0x33780000); /* 60 Hz */ } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = vga_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u8 ret; /* check if the device responds */ usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); ret = reg_r(gspca_dev, 0x0740); if (gspca_dev->usb_err >= 0) { if (ret != 0xff) { pr_err("init reg: 0x%02x\n", ret); gspca_dev->usb_err = -EIO; } } return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret, value; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); /* work on alternate 1 */ usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); set_par(gspca_dev, 0x10000000); set_par(gspca_dev, 0x00000000); set_par(gspca_dev, 0x8002e001); set_par(gspca_dev, 0x14000000); if (gspca_dev->pixfmt.width > 320) value = 0x8002e001; /* 640x480 */ else value = 0x4001f000; /* 320x240 */ set_par(gspca_dev, value); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->alt); if (ret < 0) { pr_err("set intf %d %d failed\n", gspca_dev->iface, gspca_dev->alt); gspca_dev->usb_err = ret; goto out; } reg_r(gspca_dev, 0x0630); rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */ reg_r(gspca_dev, 0x0650); snd_val(gspca_dev, 0x000020, 0xffffffff); reg_w(gspca_dev, 0x0620, 0); reg_w(gspca_dev, 0x0630, 0); reg_w(gspca_dev, 0x0640, 0); reg_w(gspca_dev, 0x0650, 0); reg_w(gspca_dev, 0x0660, 0); set_par(gspca_dev, 0x09800000); /* Red ? */ set_par(gspca_dev, 0x0a800000); /* Green ? */ set_par(gspca_dev, 0x0b800000); /* Blue ? */ set_par(gspca_dev, 0x0d030000); /* Gamma ? */ /* start the video flow */ set_par(gspca_dev, 0x01000000); set_par(gspca_dev, 0x01000000); if (gspca_dev->usb_err >= 0) PDEBUG(D_STREAM, "camera started alt: 0x%02x", gspca_dev->alt); out: return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; set_par(gspca_dev, 0x02000000); set_par(gspca_dev, 0x02000000); usb_set_interface(dev, gspca_dev->iface, 1); reg_r(gspca_dev, 0x0630); rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */ reg_r(gspca_dev, 0x0650); snd_val(gspca_dev, 0x000020, 0xffffffff); reg_w(gspca_dev, 0x0620, 0); reg_w(gspca_dev, 0x0630, 0); reg_w(gspca_dev, 0x0640, 0); reg_w(gspca_dev, 0x0650, 0); reg_w(gspca_dev, 0x0660, 0); PDEBUG(D_STREAM, "camera stopped"); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; static unsigned char ffd9[] = {0xff, 0xd9}; /* a frame starts with: * - 0xff 0xfe * - 0x08 0x00 - length (little endian ?!) * - 4 bytes = size of whole frame (BE - including header) * - 0x00 0x0c * - 0xff 0xd8 * - .. JPEG image with escape sequences (ff 00) * (without ending - ff d9) */ if (data[0] == 0xff && data[1] == 0xfe) { gspca_frame_add(gspca_dev, LAST_PACKET, ffd9, 2); /* put the JPEG 411 header */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* beginning of the frame */ #define STKHDRSZ 12 data += STKHDRSZ; len -= STKHDRSZ; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_POWER_LINE_FREQUENCY: setlightfreq(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 127); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 127); v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1, V4L2_CID_POWER_LINE_FREQUENCY_50HZ); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x05e1, 0x0893)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
mozilla-b2g/rpi-linux
net/sched/cls_api.c
1315
14245
/* * net/sched/cls_api.c Packet classifier API. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> /* The list of all installed classifier types */ static struct tcf_proto_ops *tcf_proto_base __read_mostly; /* Protects list of registered TC modules. It is pure SMP lock. */ static DEFINE_RWLOCK(cls_mod_lock); /* Find classifier type by string name */ static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind) { const struct tcf_proto_ops *t = NULL; if (kind) { read_lock(&cls_mod_lock); for (t = tcf_proto_base; t; t = t->next) { if (nla_strcmp(kind, t->kind) == 0) { if (!try_module_get(t->owner)) t = NULL; break; } } read_unlock(&cls_mod_lock); } return t; } /* Register(unregister) new classifier type */ int register_tcf_proto_ops(struct tcf_proto_ops *ops) { struct tcf_proto_ops *t, **tp; int rc = -EEXIST; write_lock(&cls_mod_lock); for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) if (!strcmp(ops->kind, t->kind)) goto out; ops->next = NULL; *tp = ops; rc = 0; out: write_unlock(&cls_mod_lock); return rc; } EXPORT_SYMBOL(register_tcf_proto_ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) { struct tcf_proto_ops *t, **tp; int rc = -ENOENT; write_lock(&cls_mod_lock); for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) if (t == ops) break; if (!t) goto out; *tp = t->next; rc = 0; out: write_unlock(&cls_mod_lock); return rc; } EXPORT_SYMBOL(unregister_tcf_proto_ops); static int tfilter_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, unsigned long fh, int event); /* Select new prio value from the range, managed by kernel. */ static inline u32 tcf_auto_prio(struct tcf_proto *tp) { u32 first = TC_H_MAKE(0xC0000000U, 0U); if (tp) first = tp->prio - 1; return first; } /* Add/change/delete/get a filter node */ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; spinlock_t *root_lock; struct tcmsg *t; u32 protocol; u32 prio; u32 nprio; u32 parent; struct net_device *dev; struct Qdisc *q; struct tcf_proto **back, **chain; struct tcf_proto *tp; const struct tcf_proto_ops *tp_ops; const struct Qdisc_class_ops *cops; unsigned long cl; unsigned long fh; int err; int tp_created = 0; if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; replay: err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); if (err < 0) return err; t = nlmsg_data(n); protocol = TC_H_MIN(t->tcm_info); prio = TC_H_MAJ(t->tcm_info); nprio = prio; parent = t->tcm_parent; cl = 0; if (prio == 0) { /* If no priority is given, user wants we allocated it. */ if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags & NLM_F_CREATE)) return -ENOENT; prio = TC_H_MAKE(0x80000000U, 0U); } /* Find head of filter chain. */ /* Find link */ dev = __dev_get_by_index(net, t->tcm_ifindex); if (dev == NULL) return -ENODEV; /* Find qdisc */ if (!parent) { q = dev->qdisc; parent = q->handle; } else { q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); if (q == NULL) return -EINVAL; } /* Is it classful? */ cops = q->ops->cl_ops; if (!cops) return -EINVAL; if (cops->tcf_chain == NULL) return -EOPNOTSUPP; /* Do we search for filter, attached to class? */ if (TC_H_MIN(parent)) { cl = cops->get(q, parent); if (cl == 0) return -ENOENT; } /* And the last stroke */ chain = cops->tcf_chain(q, cl); err = -EINVAL; if (chain == NULL) goto errout; /* Check the chain for existence of proto-tcf with this priority */ for (back = chain; (tp = *back) != NULL; back = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { if (!nprio || (tp->protocol != protocol && protocol)) goto errout; } else tp = NULL; break; } } root_lock = qdisc_root_sleeping_lock(q); if (tp == NULL) { /* Proto-tcf does not exist, create new one */ if (tca[TCA_KIND] == NULL || !protocol) goto errout; err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags & NLM_F_CREATE)) goto errout; /* Create new proto tcf */ err = -ENOBUFS; tp = kzalloc(sizeof(*tp), GFP_KERNEL); if (tp == NULL) goto errout; err = -ENOENT; tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]); if (tp_ops == NULL) { #ifdef CONFIG_MODULES struct nlattr *kind = tca[TCA_KIND]; char name[IFNAMSIZ]; if (kind != NULL && nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { rtnl_unlock(); request_module("cls_%s", name); rtnl_lock(); tp_ops = tcf_proto_lookup_ops(kind); /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * replay the request. We indicate this using * -EAGAIN. */ if (tp_ops != NULL) { module_put(tp_ops->owner); err = -EAGAIN; } } #endif kfree(tp); goto errout; } tp->ops = tp_ops; tp->protocol = protocol; tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); tp->q = q; tp->classify = tp_ops->classify; tp->classid = parent; err = tp_ops->init(tp); if (err != 0) { module_put(tp_ops->owner); kfree(tp); goto errout; } tp_created = 1; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) goto errout; fh = tp->ops->get(tp, t->tcm_handle); if (fh == 0) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { spin_lock_bh(root_lock); *back = tp->next; spin_unlock_bh(root_lock); tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); tcf_destroy(tp); err = 0; goto errout; } err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags & NLM_F_CREATE)) goto errout; } else { switch (n->nlmsg_type) { case RTM_NEWTFILTER: err = -EEXIST; if (n->nlmsg_flags & NLM_F_EXCL) { if (tp_created) tcf_destroy(tp); goto errout; } break; case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); if (err == 0) tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); goto errout; case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); goto errout; default: err = -EINVAL; goto errout; } } err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh); if (err == 0) { if (tp_created) { spin_lock_bh(root_lock); tp->next = *back; *back = tp; spin_unlock_bh(root_lock); } tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); } else { if (tp_created) tcf_destroy(tp); } errout: if (cl) cops->put(q, cl); if (err == -EAGAIN) /* Replay the request. */ goto replay; return err; } static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh, u32 portid, u32 seq, u16 flags, int event) { struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) goto out_nlmsg_trim; tcm = nlmsg_data(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; tcm->tcm_parent = tp->classid; tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) goto nla_put_failure; tcm->tcm_handle = fh; if (RTM_DELTFILTER != event) { tcm->tcm_handle = 0; if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0) goto nla_put_failure; } nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; out_nlmsg_trim: nla_put_failure: nlmsg_trim(skb, b); return -1; } static int tfilter_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, unsigned long fh, int event) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tcf_fill_node(skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { kfree_skb(skb); return -EINVAL; } return rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); } struct tcf_dump_args { struct tcf_walker w; struct sk_buff *skb; struct netlink_callback *cb; }; static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, struct tcf_walker *arg) { struct tcf_dump_args *a = (void *)arg; return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).portid, a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); } /* called with RTNL */ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int t; int s_t; struct net_device *dev; struct Qdisc *q; struct tcf_proto *tp, **chain; struct tcmsg *tcm = nlmsg_data(cb->nlh); unsigned long cl = 0; const struct Qdisc_class_ops *cops; struct tcf_dump_args arg; if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return skb->len; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return skb->len; if (!tcm->tcm_parent) q = dev->qdisc; else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) goto out; cops = q->ops->cl_ops; if (!cops) goto errout; if (cops->tcf_chain == NULL) goto errout; if (TC_H_MIN(tcm->tcm_parent)) { cl = cops->get(q, tcm->tcm_parent); if (cl == 0) goto errout; } chain = cops->tcf_chain(q, cl); if (chain == NULL) goto errout; s_t = cb->args[0]; for (tp = *chain, t = 0; tp; tp = tp->next, t++) { if (t < s_t) continue; if (TC_H_MAJ(tcm->tcm_info) && TC_H_MAJ(tcm->tcm_info) != tp->prio) continue; if (TC_H_MIN(tcm->tcm_info) && TC_H_MIN(tcm->tcm_info) != tp->protocol) continue; if (t > s_t) memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); if (cb->args[1] == 0) { if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) break; cb->args[1] = 1; } if (tp->ops->walk == NULL) continue; arg.w.fn = tcf_node_dump; arg.skb = skb; arg.cb = cb; arg.w.stop = 0; arg.w.skip = cb->args[1] - 1; arg.w.count = 0; tp->ops->walk(tp, &arg.w); cb->args[1] = arg.w.count + 1; if (arg.w.stop) break; } cb->args[0] = t; errout: if (cl) cops->put(q, cl); out: return skb->len; } void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT if (exts->action) { tcf_action_destroy(exts->action, TCA_ACT_UNBIND); exts->action = NULL; } #endif } EXPORT_SYMBOL(tcf_exts_destroy); int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, const struct tcf_ext_map *map) { memset(exts, 0, sizeof(*exts)); #ifdef CONFIG_NET_CLS_ACT { struct tc_action *act; if (map->police && tb[map->police]) { act = tcf_action_init_1(net, tb[map->police], rate_tlv, "police", TCA_ACT_NOREPLACE, TCA_ACT_BIND); if (IS_ERR(act)) return PTR_ERR(act); act->type = TCA_OLD_COMPAT; exts->action = act; } else if (map->action && tb[map->action]) { act = tcf_action_init(net, tb[map->action], rate_tlv, NULL, TCA_ACT_NOREPLACE, TCA_ACT_BIND); if (IS_ERR(act)) return PTR_ERR(act); exts->action = act; } } #else if ((map->action && tb[map->action]) || (map->police && tb[map->police])) return -EOPNOTSUPP; #endif return 0; } EXPORT_SYMBOL(tcf_exts_validate); void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, struct tcf_exts *src) { #ifdef CONFIG_NET_CLS_ACT if (src->action) { struct tc_action *act; tcf_tree_lock(tp); act = dst->action; dst->action = src->action; tcf_tree_unlock(tp); if (act) tcf_action_destroy(act, TCA_ACT_UNBIND); } #endif } EXPORT_SYMBOL(tcf_exts_change); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, const struct tcf_ext_map *map) { #ifdef CONFIG_NET_CLS_ACT if (map->action && exts->action) { /* * again for backward compatible mode - we want * to work with both old and new modes of entering * tc data even if iproute2 was newer - jhs */ struct nlattr *nest; if (exts->action->type != TCA_OLD_COMPAT) { nest = nla_nest_start(skb, map->action); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump(skb, exts->action, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } else if (map->police) { nest = nla_nest_start(skb, map->police); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } } #endif return 0; nla_put_failure: __attribute__ ((unused)) return -1; } EXPORT_SYMBOL(tcf_exts_dump); int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, const struct tcf_ext_map *map) { #ifdef CONFIG_NET_CLS_ACT if (exts->action) if (tcf_action_copy_stats(skb, exts->action, 1) < 0) goto nla_put_failure; #endif return 0; nla_put_failure: __attribute__ ((unused)) return -1; } EXPORT_SYMBOL(tcf_exts_dump_stats); static int __init tc_filter_init(void) { rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, tc_dump_tfilter, NULL); return 0; } subsys_initcall(tc_filter_init);
gpl-2.0
HelpMeRuth/ruthless
drivers/iommu/amd_iommu_init.c
2083
54638
/* * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel <joerg.roedel@amd.com> * Leo Duran <leo.duran@amd.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/pci.h> #include <linux/acpi.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/interrupt.h> #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> #include <acpi/acpi.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> #include <asm/io_apic.h> #include <asm/irq_remapping.h> #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" /* * definitions for the ACPI scanning code */ #define IVRS_HEADER_LENGTH 48 #define ACPI_IVHD_TYPE 0x10 #define ACPI_IVMD_TYPE_ALL 0x20 #define ACPI_IVMD_TYPE 0x21 #define ACPI_IVMD_TYPE_RANGE 0x22 #define IVHD_DEV_ALL 0x01 #define IVHD_DEV_SELECT 0x02 #define IVHD_DEV_SELECT_RANGE_START 0x03 #define IVHD_DEV_RANGE_END 0x04 #define IVHD_DEV_ALIAS 0x42 #define IVHD_DEV_ALIAS_RANGE 0x43 #define IVHD_DEV_EXT_SELECT 0x46 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 #define IVHD_DEV_SPECIAL 0x48 #define IVHD_SPECIAL_IOAPIC 1 #define IVHD_SPECIAL_HPET 2 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 #define IVHD_FLAG_ISOC_EN_MASK 0x08 #define IVMD_FLAG_EXCL_RANGE 0x08 #define IVMD_FLAG_UNITY_MAP 0x01 #define ACPI_DEVFLAG_INITPASS 0x01 #define ACPI_DEVFLAG_EXTINT 0x02 #define ACPI_DEVFLAG_NMI 0x04 #define ACPI_DEVFLAG_SYSMGT1 0x10 #define ACPI_DEVFLAG_SYSMGT2 0x20 #define ACPI_DEVFLAG_LINT0 0x40 #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 /* * ACPI table definitions * * These data structures are laid over the table to parse the important values * out of it. */ /* * structure describing one IOMMU in the ACPI table. Typically followed by one * or more ivhd_entrys. */ struct ivhd_header { u8 type; u8 flags; u16 length; u16 devid; u16 cap_ptr; u64 mmio_phys; u16 pci_seg; u16 info; u32 reserved; } __attribute__((packed)); /* * A device entry describing which devices a specific IOMMU translates and * which requestor ids they use. */ struct ivhd_entry { u8 type; u16 devid; u8 flags; u32 ext; } __attribute__((packed)); /* * An AMD IOMMU memory definition structure. It defines things like exclusion * ranges for devices and regions that should be unity mapped. */ struct ivmd_header { u8 type; u8 flags; u16 length; u16 devid; u16 aux; u64 resv; u64 range_start; u64 range_length; } __attribute__((packed)); bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; u16 amd_iommu_last_bdf; /* largest PCI device id we have to handle */ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ /* Array to assign indices to IOMMUs*/ struct amd_iommu *amd_iommus[MAX_IOMMUS]; int amd_iommus_present; /* IOMMUs have a non-present cache? */ bool amd_iommu_np_cache __read_mostly; bool amd_iommu_iotlb_sup __read_mostly = true; u32 amd_iommu_max_pasids __read_mostly = ~0; bool amd_iommu_v2_present __read_mostly; bool amd_iommu_force_isolation __read_mostly; /* * List of protection domains - used during resume */ LIST_HEAD(amd_iommu_pd_list); spinlock_t amd_iommu_pd_lock; /* * Pointer to the device table which is shared by all AMD IOMMUs * it is indexed by the PCI device id or the HT unit id and contains * information about the domain the device belongs to as well as the * page table root pointer. */ struct dev_table_entry *amd_iommu_dev_table; /* * The alias table is a driver specific data structure which contains the * mappings of the PCI device ids to the actual requestor ids on the IOMMU. * More than one device can share the same requestor id. */ u16 *amd_iommu_alias_table; /* * The rlookup table is used to find the IOMMU which is responsible * for a specific device. It is also indexed by the PCI device id. */ struct amd_iommu **amd_iommu_rlookup_table; /* * This table is used to find the irq remapping table for a given device id * quickly. */ struct irq_remap_table **irq_lookup_table; /* * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap * to know which ones are already in use. */ unsigned long *amd_iommu_pd_alloc_bitmap; static u32 dev_table_size; /* size of the device table */ static u32 alias_table_size; /* size of the alias table */ static u32 rlookup_table_size; /* size if the rlookup table */ enum iommu_init_state { IOMMU_START_STATE, IOMMU_IVRS_DETECTED, IOMMU_ACPI_FINISHED, IOMMU_ENABLED, IOMMU_PCI_INIT, IOMMU_INTERRUPTS_EN, IOMMU_DMA_OPS, IOMMU_INITIALIZED, IOMMU_NOT_FOUND, IOMMU_INIT_ERROR, }; /* Early ioapic and hpet maps from kernel command line */ #define EARLY_MAP_SIZE 4 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; static int __initdata early_ioapic_map_size; static int __initdata early_hpet_map_size; static bool __initdata cmdline_maps; static enum iommu_init_state init_state = IOMMU_START_STATE; static int amd_iommu_enable_interrupts(void); static int __init iommu_go_to_state(enum iommu_init_state state); static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) amd_iommu_last_bdf = devid; } static inline unsigned long tbl_size(int entry_size) { unsigned shift = PAGE_SHIFT + get_order(((int)amd_iommu_last_bdf + 1) * entry_size); return 1UL << shift; } /* Access to l1 and l2 indexed register spaces */ static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) { u32 val; pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); pci_read_config_dword(iommu->dev, 0xfc, &val); return val; } static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) { pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); pci_write_config_dword(iommu->dev, 0xfc, val); pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); } static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) { u32 val; pci_write_config_dword(iommu->dev, 0xf0, address); pci_read_config_dword(iommu->dev, 0xf4, &val); return val; } static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) { pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); pci_write_config_dword(iommu->dev, 0xf4, val); } /**************************************************************************** * * AMD IOMMU MMIO register space handling functions * * These functions are used to program the IOMMU device registers in * MMIO space required for that driver. * ****************************************************************************/ /* * This function set the exclusion range in the IOMMU. DMA accesses to the * exclusion range are passed through untranslated */ static void iommu_set_exclusion_range(struct amd_iommu *iommu) { u64 start = iommu->exclusion_start & PAGE_MASK; u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; u64 entry; if (!iommu->exclusion_start) return; entry = start | MMIO_EXCL_ENABLE_MASK; memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, &entry, sizeof(entry)); entry = limit; memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, &entry, sizeof(entry)); } /* Programs the physical address of the device table into the IOMMU hardware */ static void iommu_set_device_table(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->mmio_base == NULL); entry = virt_to_phys(amd_iommu_dev_table); entry |= (dev_table_size >> 12) - 1; memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, &entry, sizeof(entry)); } /* Generic functions to enable/disable certain features of the IOMMU. */ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl |= (1 << bit); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~(1 << bit); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~CTRL_INV_TO_MASK; ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } static void iommu_disable(struct amd_iommu *iommu) { /* Disable command buffer */ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); /* Disable event logging and event interrupts */ iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); } /* * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in * the system has one. */ static u8 __iomem * __init iommu_map_mmio_space(u64 address) { if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", address); pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); return NULL; } return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) { if (iommu->mmio_base) iounmap(iommu->mmio_base); release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); } /**************************************************************************** * * The functions below belong to the first pass of AMD IOMMU ACPI table * parsing. In this pass we try to find out the highest device id this * code has to handle. Upon this information the size of the shared data * structures is determined later. * ****************************************************************************/ /* * This function calculates the length of a given IVHD entry */ static inline int ivhd_entry_length(u8 *ivhd) { return 0x04 << (*ivhd >> 6); } /* * This function reads the last device id the IOMMU has to handle from the PCI * capability header for this IOMMU */ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) { u32 cap; cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); return 0; } /* * After reading the highest device id from the IOMMU PCI capability header * this function looks if there is a higher device id defined in the ACPI table */ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) { u8 *p = (void *)h, *end = (void *)h; struct ivhd_entry *dev; p += sizeof(*h); end += h->length; find_last_devid_on_pci(PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr); while (p < end) { dev = (struct ivhd_entry *)p; switch (dev->type) { case IVHD_DEV_SELECT: case IVHD_DEV_RANGE_END: case IVHD_DEV_ALIAS: case IVHD_DEV_EXT_SELECT: /* all the above subfield types refer to device ids */ update_last_devid(dev->devid); break; default: break; } p += ivhd_entry_length(p); } WARN_ON(p != end); return 0; } /* * Iterate over all IVHD entries in the ACPI table and find the highest device * id which we need to handle. This is the first of three functions which parse * the ACPI table. So we check the checksum here. */ static int __init find_last_devid_acpi(struct acpi_table_header *table) { int i; u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; /* * Validate checksum here so we don't need to do it when * we actually parse the table */ for (i = 0; i < table->length; ++i) checksum += p[i]; if (checksum != 0) /* ACPI table corrupt */ return -ENODEV; p += IVRS_HEADER_LENGTH; end += table->length; while (p < end) { h = (struct ivhd_header *)p; switch (h->type) { case ACPI_IVHD_TYPE: find_last_devid_from_ivhd(h); break; default: break; } p += h->length; } WARN_ON(p != end); return 0; } /**************************************************************************** * * The following functions belong to the code path which parses the ACPI table * the second time. In this ACPI parsing iteration we allocate IOMMU specific * data structures, initialize the device/alias/rlookup table and also * basically initialize the hardware. * ****************************************************************************/ /* * Allocates the command buffer. This buffer is per AMD IOMMU. We can * write commands to that buffer later and the IOMMU will execute them * asynchronously */ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) { u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(CMD_BUFFER_SIZE)); if (cmd_buf == NULL) return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; } /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) { iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } /* * This function writes the command buffer address to the hardware and * enables it. */ static void iommu_enable_command_buffer(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->cmd_buf == NULL); entry = (u64)virt_to_phys(iommu->cmd_buf); entry |= MMIO_CMD_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } /* allocates the memory where the IOMMU will log its events to */ static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) { iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(EVT_BUFFER_SIZE)); if (iommu->evt_buf == NULL) return NULL; iommu->evt_buf_size = EVT_BUFFER_SIZE; return iommu->evt_buf; } static void iommu_enable_event_buffer(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->evt_buf == NULL); entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, &entry, sizeof(entry)); /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); } static void __init free_event_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); } /* allocates the memory where the IOMMU will log its events to */ static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) { iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(PPR_LOG_SIZE)); if (iommu->ppr_log == NULL) return NULL; return iommu->ppr_log; } static void iommu_enable_ppr_log(struct amd_iommu *iommu) { u64 entry; if (iommu->ppr_log == NULL) return; entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, &entry, sizeof(entry)); /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); iommu_feature_enable(iommu, CONTROL_PPR_EN); } static void __init free_ppr_log(struct amd_iommu *iommu) { if (iommu->ppr_log == NULL) return; free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); } static void iommu_enable_gt(struct amd_iommu *iommu) { if (!iommu_feature(iommu, FEATURE_GT)) return; iommu_feature_enable(iommu, CONTROL_GT_EN); } /* sets a specific bit in the device table entry. */ static void set_dev_entry_bit(u16 devid, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); } static int get_dev_entry_bit(u16 devid, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; } void amd_iommu_apply_erratum_63(u16 devid) { int sysmgt; sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); if (sysmgt == 0x01) set_dev_entry_bit(devid, DEV_ENTRY_IW); } /* Writes the specific IOMMU for a device into the rlookup table */ static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) { amd_iommu_rlookup_table[devid] = iommu; } /* * This function takes the device specific flags read from the ACPI * table and sets up the device table entry with that information */ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, u16 devid, u32 flags, u32 ext_flags) { if (flags & ACPI_DEVFLAG_INITPASS) set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); if (flags & ACPI_DEVFLAG_EXTINT) set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); if (flags & ACPI_DEVFLAG_NMI) set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); if (flags & ACPI_DEVFLAG_SYSMGT1) set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); if (flags & ACPI_DEVFLAG_SYSMGT2) set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); if (flags & ACPI_DEVFLAG_LINT0) set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); if (flags & ACPI_DEVFLAG_LINT1) set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); amd_iommu_apply_erratum_63(devid); set_iommu_for_device(iommu, devid); } static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; if (type == IVHD_SPECIAL_IOAPIC) list = &ioapic_map; else if (type == IVHD_SPECIAL_HPET) list = &hpet_map; else return -EINVAL; list_for_each_entry(entry, list, list) { if (!(entry->id == id && entry->cmd_line)) continue; pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); return 0; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->id = id; entry->devid = devid; entry->cmd_line = cmd_line; list_add_tail(&entry->list, list); return 0; } static int __init add_early_maps(void) { int i, ret; for (i = 0; i < early_ioapic_map_size; ++i) { ret = add_special_device(IVHD_SPECIAL_IOAPIC, early_ioapic_map[i].id, early_ioapic_map[i].devid, early_ioapic_map[i].cmd_line); if (ret) return ret; } for (i = 0; i < early_hpet_map_size; ++i) { ret = add_special_device(IVHD_SPECIAL_HPET, early_hpet_map[i].id, early_hpet_map[i].devid, early_hpet_map[i].cmd_line); if (ret) return ret; } return 0; } /* * Reads the device exclusion range from ACPI and initializes the IOMMU with * it */ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) { struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) return; if (iommu) { /* * We only can configure exclusion ranges per IOMMU, not * per device. But we can enable the exclusion range per * device. This is done here */ set_dev_entry_bit(m->devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } } /* * Takes a pointer to an AMD IOMMU entry in the ACPI table and * initializes the hardware and our data structures with it. */ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_header *h) { u8 *p = (u8 *)h; u8 *end = p, flags = 0; u16 devid = 0, devid_start = 0, devid_to = 0; u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; int ret; ret = add_early_maps(); if (ret) return ret; /* * First save the recommended feature enable bits from ACPI */ iommu->acpi_flags = h->flags; /* * Done. Now parse the device entries */ p += sizeof(struct ivhd_header); end += h->length; while (p < end) { e = (struct ivhd_entry *)p; switch (e->type) { case IVHD_DEV_ALL: DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" " last device %02x:%02x.%x flags: %02x\n", PCI_BUS_NUM(iommu->first_device), PCI_SLOT(iommu->first_device), PCI_FUNC(iommu->first_device), PCI_BUS_NUM(iommu->last_device), PCI_SLOT(iommu->last_device), PCI_FUNC(iommu->last_device), e->flags); for (dev_i = iommu->first_device; dev_i <= iommu->last_device; ++dev_i) set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); break; case IVHD_DEV_SELECT: DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " "flags: %02x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); devid = e->devid; set_dev_entry_from_acpi(iommu, devid, e->flags, 0); break; case IVHD_DEV_SELECT_RANGE_START: DUMP_printk(" DEV_SELECT_RANGE_START\t " "devid: %02x:%02x.%x flags: %02x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); devid_start = e->devid; flags = e->flags; ext_flags = 0; alias = false; break; case IVHD_DEV_ALIAS: DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " "flags: %02x devid_to: %02x:%02x.%x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, PCI_BUS_NUM(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); devid = e->devid; devid_to = e->ext >> 8; set_dev_entry_from_acpi(iommu, devid , e->flags, 0); set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); amd_iommu_alias_table[devid] = devid_to; break; case IVHD_DEV_ALIAS_RANGE: DUMP_printk(" DEV_ALIAS_RANGE\t\t " "devid: %02x:%02x.%x flags: %02x " "devid_to: %02x:%02x.%x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, PCI_BUS_NUM(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); devid_start = e->devid; flags = e->flags; devid_to = e->ext >> 8; ext_flags = 0; alias = true; break; case IVHD_DEV_EXT_SELECT: DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " "flags: %02x ext: %08x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); devid = e->devid; set_dev_entry_from_acpi(iommu, devid, e->flags, e->ext); break; case IVHD_DEV_EXT_SELECT_RANGE: DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " "%02x:%02x.%x flags: %02x ext: %08x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); devid_start = e->devid; flags = e->flags; ext_flags = e->ext; alias = false; break; case IVHD_DEV_RANGE_END: DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid)); devid = e->devid; for (dev_i = devid_start; dev_i <= devid; ++dev_i) { if (alias) { amd_iommu_alias_table[dev_i] = devid_to; set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); } set_dev_entry_from_acpi(iommu, dev_i, flags, ext_flags); } break; case IVHD_DEV_SPECIAL: { u8 handle, type; const char *var; u16 devid; int ret; handle = e->ext & 0xff; devid = (e->ext >> 8) & 0xffff; type = (e->ext >> 24) & 0xff; if (type == IVHD_SPECIAL_IOAPIC) var = "IOAPIC"; else if (type == IVHD_SPECIAL_HPET) var = "HPET"; else var = "UNKNOWN"; DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", var, (int)handle, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); set_dev_entry_from_acpi(iommu, devid, e->flags, 0); ret = add_special_device(type, handle, devid, false); if (ret) return ret; break; } default: break; } p += ivhd_entry_length(p); } return 0; } /* Initializes the device->iommu mapping for the driver */ static int __init init_iommu_devices(struct amd_iommu *iommu) { u32 i; for (i = iommu->first_device; i <= iommu->last_device; ++i) set_iommu_for_device(iommu, i); return 0; } static void __init free_iommu_one(struct amd_iommu *iommu) { free_command_buffer(iommu); free_event_buffer(iommu); free_ppr_log(iommu); iommu_unmap_mmio_space(iommu); } static void __init free_iommu_all(void) { struct amd_iommu *iommu, *next; for_each_iommu_safe(iommu, next) { list_del(&iommu->list); free_iommu_one(iommu); kfree(iommu); } } /* * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) * Workaround: * BIOS should disable L2B micellaneous clock gating by setting * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b */ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) { u32 value; if ((boot_cpu_data.x86 != 0x15) || (boot_cpu_data.x86_model < 0x10) || (boot_cpu_data.x86_model > 0x1f)) return; pci_write_config_dword(iommu->dev, 0xf0, 0x90); pci_read_config_dword(iommu->dev, 0xf4, &value); if (value & BIT(2)) return; /* Select NB indirect register 0x90 and enable writing */ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", dev_name(&iommu->dev->dev)); /* Clear the enable writing bit */ pci_write_config_dword(iommu->dev, 0xf0, 0x90); } /* * This function clues the initialization function for one IOMMU * together and also allocates the command buffer and programs the * hardware. It does NOT enable the IOMMU. This is done afterwards. */ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) { int ret; spin_lock_init(&iommu->lock); /* Add IOMMU to internal data structures */ list_add_tail(&iommu->list, &amd_iommu_list); iommu->index = amd_iommus_present++; if (unlikely(iommu->index >= MAX_IOMMUS)) { WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); return -ENOSYS; } /* Index is fine - add IOMMU to the array */ amd_iommus[iommu->index] = iommu; /* * Copy data from ACPI table entry to the iommu struct */ iommu->devid = h->devid; iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); if (!iommu->mmio_base) return -ENOMEM; iommu->cmd_buf = alloc_command_buffer(iommu); if (!iommu->cmd_buf) return -ENOMEM; iommu->evt_buf = alloc_event_buffer(iommu); if (!iommu->evt_buf) return -ENOMEM; iommu->int_enabled = false; ret = init_iommu_from_acpi(iommu, h); if (ret) return ret; /* * Make sure IOMMU is not considered to translate itself. The IVRS * table tells us so, but this is a lie! */ amd_iommu_rlookup_table[iommu->devid] = NULL; init_iommu_devices(iommu); return 0; } /* * Iterates over all IOMMU entries in the ACPI table, allocates the * IOMMU structure and initializes it with init_iommu_one() */ static int __init init_iommu_all(struct acpi_table_header *table) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; struct amd_iommu *iommu; int ret; end += table->length; p += IVRS_HEADER_LENGTH; while (p < end) { h = (struct ivhd_header *)p; switch (*p) { case ACPI_IVHD_TYPE: DUMP_printk("device: %02x:%02x.%01x cap: %04x " "seg: %d flags: %01x info %04x\n", PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr, h->pci_seg, h->flags, h->info); DUMP_printk(" mmio-addr: %016llx\n", h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) return -ENOMEM; ret = init_iommu_one(iommu, h); if (ret) return ret; break; default: break; } p += h->length; } WARN_ON(p != end); return 0; } static int iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; u32 range, misc, low, high; iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), iommu->devid & 0xff); if (!iommu->dev) return -ENODEV; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, &range); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, &misc); iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range)); iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) amd_iommu_iotlb_sup = false; /* read extended feature bits */ low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); iommu->features = ((u64)high << 32) | low; if (iommu_feature(iommu, FEATURE_GT)) { int glxval; u32 pasids; u64 shift; shift = iommu->features & FEATURE_PASID_MASK; shift >>= FEATURE_PASID_SHIFT; pasids = (1 << shift); amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); glxval = iommu->features & FEATURE_GLXVAL_MASK; glxval >>= FEATURE_GLXVAL_SHIFT; if (amd_iommu_max_glx_val == -1) amd_iommu_max_glx_val = glxval; else amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); } if (iommu_feature(iommu, FEATURE_GT) && iommu_feature(iommu, FEATURE_PPR)) { iommu->is_iommu_v2 = true; amd_iommu_v2_present = true; } if (iommu_feature(iommu, FEATURE_PPR)) { iommu->ppr_log = alloc_ppr_log(iommu); if (!iommu->ppr_log) return -ENOMEM; } if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; if (is_rd890_iommu(iommu->dev)) { int i, j; iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); /* * Some rd890 systems may not be fully reconfigured by the * BIOS, so it's necessary for us to store this information so * it can be reprogrammed on resume */ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, &iommu->stored_addr_lo); pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, &iommu->stored_addr_hi); /* Low bit locks writes to configuration space */ iommu->stored_addr_lo &= ~1; for (i = 0; i < 6; i++) for (j = 0; j < 0x12; j++) iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); for (i = 0; i < 0x83; i++) iommu->stored_l2[i] = iommu_read_l2(iommu, i); } amd_iommu_erratum_746_workaround(iommu); return pci_enable_device(iommu->dev); } static void print_iommu_info(void) { static const char * const feat_str[] = { "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", "IA", "GA", "HE", "PC" }; struct amd_iommu *iommu; for_each_iommu(iommu) { int i; pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n", dev_name(&iommu->dev->dev), iommu->cap_ptr); if (iommu->cap & (1 << IOMMU_CAP_EFR)) { pr_info("AMD-Vi: Extended features: "); for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } pr_cont("\n"); } } if (irq_remapping_enabled) pr_info("AMD-Vi: Interrupt remapping enabled\n"); } static int __init amd_iommu_init_pci(void) { struct amd_iommu *iommu; int ret = 0; for_each_iommu(iommu) { ret = iommu_init_pci(iommu); if (ret) break; } ret = amd_iommu_init_devices(); print_iommu_info(); return ret; } /**************************************************************************** * * The following functions initialize the MSI interrupts for all IOMMUs * in the system. It's a bit challenging because there could be multiple * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per * pci_dev. * ****************************************************************************/ static int iommu_setup_msi(struct amd_iommu *iommu) { int r; r = pci_enable_msi(iommu->dev); if (r) return r; r = request_threaded_irq(iommu->dev->irq, amd_iommu_int_handler, amd_iommu_int_thread, 0, "AMD-Vi", iommu); if (r) { pci_disable_msi(iommu->dev); return r; } iommu->int_enabled = true; return 0; } static int iommu_init_msi(struct amd_iommu *iommu) { int ret; if (iommu->int_enabled) goto enable_faults; if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) ret = iommu_setup_msi(iommu); else ret = -ENODEV; if (ret) return ret; enable_faults: iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); if (iommu->ppr_log != NULL) iommu_feature_enable(iommu, CONTROL_PPFINT_EN); return 0; } /**************************************************************************** * * The next functions belong to the third pass of parsing the ACPI * table. In this last pass the memory mapping requirements are * gathered (like exclusion and unity mapping ranges). * ****************************************************************************/ static void __init free_unity_maps(void) { struct unity_map_entry *entry, *next; list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { list_del(&entry->list); kfree(entry); } } /* called when we find an exclusion range definition in ACPI */ static int __init init_exclusion_range(struct ivmd_header *m) { int i; switch (m->type) { case ACPI_IVMD_TYPE: set_device_exclusion_range(m->devid, m); break; case ACPI_IVMD_TYPE_ALL: for (i = 0; i <= amd_iommu_last_bdf; ++i) set_device_exclusion_range(i, m); break; case ACPI_IVMD_TYPE_RANGE: for (i = m->devid; i <= m->aux; ++i) set_device_exclusion_range(i, m); break; default: break; } return 0; } /* called for unity map ACPI definition */ static int __init init_unity_map_range(struct ivmd_header *m) { struct unity_map_entry *e = NULL; char *s; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return -ENOMEM; switch (m->type) { default: kfree(e); return 0; case ACPI_IVMD_TYPE: s = "IVMD_TYPEi\t\t\t"; e->devid_start = e->devid_end = m->devid; break; case ACPI_IVMD_TYPE_ALL: s = "IVMD_TYPE_ALL\t\t"; e->devid_start = 0; e->devid_end = amd_iommu_last_bdf; break; case ACPI_IVMD_TYPE_RANGE: s = "IVMD_TYPE_RANGE\t\t"; e->devid_start = m->devid; e->devid_end = m->aux; break; } e->address_start = PAGE_ALIGN(m->range_start); e->address_end = e->address_start + PAGE_ALIGN(m->range_length); e->prot = m->flags >> 1; DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" " range_start: %016llx range_end: %016llx flags: %x\n", s, PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), e->address_start, e->address_end, m->flags); list_add_tail(&e->list, &amd_iommu_unity_map); return 0; } /* iterates over all memory definitions we find in the ACPI table */ static int __init init_memory_definitions(struct acpi_table_header *table) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivmd_header *m; end += table->length; p += IVRS_HEADER_LENGTH; while (p < end) { m = (struct ivmd_header *)p; if (m->flags & IVMD_FLAG_EXCL_RANGE) init_exclusion_range(m); else if (m->flags & IVMD_FLAG_UNITY_MAP) init_unity_map_range(m); p += m->length; } return 0; } /* * Init the device table to not allow DMA access for devices and * suppress all page faults */ static void init_device_table_dma(void) { u32 devid; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); } } static void __init uninit_device_table_dma(void) { u32 devid; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { amd_iommu_dev_table[devid].data[0] = 0ULL; amd_iommu_dev_table[devid].data[1] = 0ULL; } } static void init_device_table(void) { u32 devid; if (!amd_iommu_irq_remap) return; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); } static void iommu_init_flags(struct amd_iommu *iommu) { iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : iommu_feature_disable(iommu, CONTROL_PASSPW_EN); iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? iommu_feature_enable(iommu, CONTROL_ISOC_EN) : iommu_feature_disable(iommu, CONTROL_ISOC_EN); /* * make IOMMU memory accesses cache coherent */ iommu_feature_enable(iommu, CONTROL_COHERENT_EN); /* Set IOTLB invalidation timeout to 1s */ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); } static void iommu_apply_resume_quirks(struct amd_iommu *iommu) { int i, j; u32 ioc_feature_control; struct pci_dev *pdev = iommu->root_pdev; /* RD890 BIOSes may not have completely reconfigured the iommu */ if (!is_rd890_iommu(iommu->dev) || !pdev) return; /* * First, we need to ensure that the iommu is enabled. This is * controlled by a register in the northbridge */ /* Select Northbridge indirect register 0x75 and enable writing */ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); pci_read_config_dword(pdev, 0x64, &ioc_feature_control); /* Enable the iommu */ if (!(ioc_feature_control & 0x1)) pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); /* Restore the iommu BAR */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo); pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, iommu->stored_addr_hi); /* Restore the l1 indirect regs for each of the 6 l1s */ for (i = 0; i < 6; i++) for (j = 0; j < 0x12; j++) iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); /* Restore the l2 indirect regs */ for (i = 0; i < 0x83; i++) iommu_write_l2(iommu, i, iommu->stored_l2[i]); /* Lock PCI setup registers */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo | 1); } /* * This function finally enables all IOMMUs found in the system after * they have been initialized */ static void early_enable_iommus(void) { struct amd_iommu *iommu; for_each_iommu(iommu) { iommu_disable(iommu); iommu_init_flags(iommu); iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } } static void enable_iommus_v2(void) { struct amd_iommu *iommu; for_each_iommu(iommu) { iommu_enable_ppr_log(iommu); iommu_enable_gt(iommu); } } static void enable_iommus(void) { early_enable_iommus(); enable_iommus_v2(); } static void disable_iommus(void) { struct amd_iommu *iommu; for_each_iommu(iommu) iommu_disable(iommu); } /* * Suspend/Resume support * disable suspend until real resume implemented */ static void amd_iommu_resume(void) { struct amd_iommu *iommu; for_each_iommu(iommu) iommu_apply_resume_quirks(iommu); /* re-load the hardware */ enable_iommus(); amd_iommu_enable_interrupts(); } static int amd_iommu_suspend(void) { /* disable IOMMUs to go out of the way for BIOS */ disable_iommus(); return 0; } static struct syscore_ops amd_iommu_syscore_ops = { .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, }; static void __init free_on_init_error(void) { free_pages((unsigned long)irq_lookup_table, get_order(rlookup_table_size)); if (amd_iommu_irq_cache) { kmem_cache_destroy(amd_iommu_irq_cache); amd_iommu_irq_cache = NULL; } free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); free_pages((unsigned long)amd_iommu_alias_table, get_order(alias_table_size)); free_pages((unsigned long)amd_iommu_dev_table, get_order(dev_table_size)); free_iommu_all(); #ifdef CONFIG_GART_IOMMU /* * We failed to initialize the AMD IOMMU - try fallback to GART * if possible. */ gart_iommu_init(); #endif } /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; bool ret, has_sb_ioapic; int idx; has_sb_ioapic = false; ret = false; /* * If we have map overrides on the kernel command line the * messages in this function might not describe firmware bugs * anymore - so be careful */ if (cmdline_maps) fw_bug = ""; for (idx = 0; idx < nr_ioapics; idx++) { int devid, id = mpc_ioapic_id(idx); devid = get_ioapic_devid(id); if (devid < 0) { pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; } else if (devid == IOAPIC_SB_DEVID) { has_sb_ioapic = true; ret = true; } } if (!has_sb_ioapic) { /* * We expect the SB IOAPIC to be listed in the IVRS * table. The system timer is connected to the SB IOAPIC * and if we don't have it in the list the system will * panic at boot time. This situation usually happens * when the BIOS is buggy and provides us the wrong * device id for the IOAPIC in the system. */ pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); } if (!ret) pr_err("AMD-Vi: Disabling interrupt remapping\n"); return ret; } static void __init free_dma_resources(void) { amd_iommu_uninit_devices(); free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, get_order(MAX_DOMAIN_ID/8)); free_unity_maps(); } /* * This is the hardware init function for AMD IOMMU in the system. * This function is called either from amd_iommu_init or from the interrupt * remapping setup code. * * This function basically parses the ACPI table for AMD IOMMU (IVRS) * three times: * * 1 pass) Find the highest PCI device id the driver has to handle. * Upon this information the size of the data structures is * determined that needs to be allocated. * * 2 pass) Initialize the data structures just allocated with the * information in the ACPI table about available AMD IOMMUs * in the system. It also maps the PCI devices in the * system to specific IOMMUs * * 3 pass) After the basic data structures are allocated and * initialized we update them with information about memory * remapping requirements parsed out of the ACPI table in * this last pass. * * After everything is set up the IOMMUs are enabled and the necessary * hotplug and suspend notifiers are registered. */ static int __init early_amd_iommu_init(void) { struct acpi_table_header *ivrs_base; acpi_size ivrs_size; acpi_status status; int i, ret = 0; if (!amd_iommu_detected) return -ENODEV; status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); if (status == AE_NOT_FOUND) return -ENODEV; else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); pr_err("AMD-Vi: IVRS table error: %s\n", err); return -EINVAL; } /* * First parse ACPI tables to find the largest Bus/Dev/Func * we need to handle. Upon this information the shared data * structures for the IOMMUs in the system will be allocated */ ret = find_last_devid_acpi(ivrs_base); if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); /* Device table - directly used by all IOMMUs */ ret = -ENOMEM; amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(dev_table_size)); if (amd_iommu_dev_table == NULL) goto out; /* * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the * IOMMU see for that device */ amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, get_order(alias_table_size)); if (amd_iommu_alias_table == NULL) goto out; /* IOMMU rlookup table - find the IOMMU for a specific device */ amd_iommu_rlookup_table = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(rlookup_table_size)); if (amd_iommu_rlookup_table == NULL) goto out; amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(MAX_DOMAIN_ID/8)); if (amd_iommu_pd_alloc_bitmap == NULL) goto out; /* * let all alias entries point to itself */ for (i = 0; i <= amd_iommu_last_bdf; ++i) amd_iommu_alias_table[i] = i; /* * never allocate domain 0 because its used as the non-allocated and * error value placeholder */ amd_iommu_pd_alloc_bitmap[0] = 1; spin_lock_init(&amd_iommu_pd_lock); /* * now the data structures are allocated and basically initialized * start the real acpi table scan */ ret = init_iommu_all(ivrs_base); if (ret) goto out; if (amd_iommu_irq_remap) amd_iommu_irq_remap = check_ioapic_information(); if (amd_iommu_irq_remap) { /* * Interrupt remapping enabled, create kmem_cache for the * remapping tables. */ ret = -ENOMEM; amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", MAX_IRQS_PER_TABLE * sizeof(u32), IRQ_TABLE_ALIGNMENT, 0, NULL); if (!amd_iommu_irq_cache) goto out; irq_lookup_table = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(rlookup_table_size)); if (!irq_lookup_table) goto out; } ret = init_memory_definitions(ivrs_base); if (ret) goto out; /* init the device table */ init_device_table(); out: /* Don't leak any ACPI memory */ early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); ivrs_base = NULL; return ret; } static int amd_iommu_enable_interrupts(void) { struct amd_iommu *iommu; int ret = 0; for_each_iommu(iommu) { ret = iommu_init_msi(iommu); if (ret) goto out; } out: return ret; } static bool detect_ivrs(void) { struct acpi_table_header *ivrs_base; acpi_size ivrs_size; acpi_status status; status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); if (status == AE_NOT_FOUND) return false; else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); pr_err("AMD-Vi: IVRS table error: %s\n", err); return false; } early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); /* Make sure ACS will be enabled during PCI probe */ pci_request_acs(); if (!disable_irq_remap) amd_iommu_irq_remap = true; return true; } static int amd_iommu_init_dma(void) { struct amd_iommu *iommu; int ret; if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else ret = amd_iommu_init_dma_ops(); if (ret) return ret; init_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); amd_iommu_init_api(); amd_iommu_init_notifier(); return 0; } /**************************************************************************** * * AMD IOMMU Initialization State Machine * ****************************************************************************/ static int __init state_next(void) { int ret = 0; switch (init_state) { case IOMMU_START_STATE: if (!detect_ivrs()) { init_state = IOMMU_NOT_FOUND; ret = -ENODEV; } else { init_state = IOMMU_IVRS_DETECTED; } break; case IOMMU_IVRS_DETECTED: ret = early_amd_iommu_init(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; break; case IOMMU_ACPI_FINISHED: early_enable_iommus(); register_syscore_ops(&amd_iommu_syscore_ops); x86_platform.iommu_shutdown = disable_iommus; init_state = IOMMU_ENABLED; break; case IOMMU_ENABLED: ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; enable_iommus_v2(); break; case IOMMU_PCI_INIT: ret = amd_iommu_enable_interrupts(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; break; case IOMMU_INTERRUPTS_EN: ret = amd_iommu_init_dma(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; break; case IOMMU_DMA_OPS: init_state = IOMMU_INITIALIZED; break; case IOMMU_INITIALIZED: /* Nothing to do */ break; case IOMMU_NOT_FOUND: case IOMMU_INIT_ERROR: /* Error states => do nothing */ ret = -EINVAL; break; default: /* Unknown state */ BUG(); } return ret; } static int __init iommu_go_to_state(enum iommu_init_state state) { int ret = 0; while (init_state != state) { ret = state_next(); if (init_state == IOMMU_NOT_FOUND || init_state == IOMMU_INIT_ERROR) break; } return ret; } #ifdef CONFIG_IRQ_REMAP int __init amd_iommu_prepare(void) { return iommu_go_to_state(IOMMU_ACPI_FINISHED); } int __init amd_iommu_supported(void) { return amd_iommu_irq_remap ? 1 : 0; } int __init amd_iommu_enable(void) { int ret; ret = iommu_go_to_state(IOMMU_ENABLED); if (ret) return ret; irq_remapping_enabled = 1; return 0; } void amd_iommu_disable(void) { amd_iommu_suspend(); } int amd_iommu_reenable(int mode) { amd_iommu_resume(); return 0; } int __init amd_iommu_enable_faulting(void) { /* We enable MSI later when PCI is initialized */ return 0; } #endif /* * This is the core init function for AMD IOMMU hardware in the system. * This function is called from the generic x86 DMA layer initialization * code. */ static int __init amd_iommu_init(void) { int ret; ret = iommu_go_to_state(IOMMU_INITIALIZED); if (ret) { free_dma_resources(); if (!irq_remapping_enabled) { disable_iommus(); free_on_init_error(); } else { struct amd_iommu *iommu; uninit_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); } } return ret; } /**************************************************************************** * * Early detect code. This code runs at IOMMU detection time in the DMA * layer. It just looks if there is an IVRS ACPI table to detect AMD * IOMMUs * ****************************************************************************/ int __init amd_iommu_detect(void) { int ret; if (no_iommu || (iommu_detected && !gart_iommu_aperture)) return -ENODEV; if (amd_iommu_disabled) return -ENODEV; ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); if (ret) return ret; amd_iommu_detected = true; iommu_detected = 1; x86_init.iommu.iommu_init = amd_iommu_init; return 0; } /**************************************************************************** * * Parsing functions for the AMD IOMMU specific kernel command line * options. * ****************************************************************************/ static int __init parse_amd_iommu_dump(char *str) { amd_iommu_dump = true; return 1; } static int __init parse_amd_iommu_options(char *str) { for (; *str; ++str) { if (strncmp(str, "fullflush", 9) == 0) amd_iommu_unmap_flush = true; if (strncmp(str, "off", 3) == 0) amd_iommu_disabled = true; if (strncmp(str, "force_isolation", 15) == 0) amd_iommu_force_isolation = true; } return 1; } static int __init parse_ivrs_ioapic(char *str) { unsigned int bus, dev, fn; int ret, id, i; u16 devid; ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); if (ret != 4) { pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); return 1; } if (early_ioapic_map_size == EARLY_MAP_SIZE) { pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", str); return 1; } devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); cmdline_maps = true; i = early_ioapic_map_size++; early_ioapic_map[i].id = id; early_ioapic_map[i].devid = devid; early_ioapic_map[i].cmd_line = true; return 1; } static int __init parse_ivrs_hpet(char *str) { unsigned int bus, dev, fn; int ret, id, i; u16 devid; ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); if (ret != 4) { pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); return 1; } if (early_hpet_map_size == EARLY_MAP_SIZE) { pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", str); return 1; } devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); cmdline_maps = true; i = early_hpet_map_size++; early_hpet_map[i].id = id; early_hpet_map[i].devid = devid; early_hpet_map[i].cmd_line = true; return 1; } __setup("amd_iommu_dump", parse_amd_iommu_dump); __setup("amd_iommu=", parse_amd_iommu_options); __setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_hpet", parse_ivrs_hpet); IOMMU_INIT_FINISH(amd_iommu_detect, gart_iommu_hole_init, NULL, NULL); bool amd_iommu_v2_supported(void) { return amd_iommu_v2_present; } EXPORT_SYMBOL(amd_iommu_v2_supported);
gpl-2.0
RenderBroken/dory_render_kernel
arch/arm/mach-omap2/omap-wakeupgen.c
2083
11252
/* * OMAP WakeupGen Source file * * OMAP WakeupGen is the interrupt controller extension used along * with ARM GIC to wake the CPU out from low power states on * external interrupts. It is responsible for generating wakeup * event from the incoming interrupts and enable bits. It is * implemented in MPU always ON power domain. During normal operation, * WakeupGen delivers external interrupts directly to the GIC. * * Copyright (C) 2011 Texas Instruments, Inc. * Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/cpu_pm.h> #include <linux/irqchip/arm-gic.h> #include "omap-wakeupgen.h" #include "omap-secure.h" #include "soc.h" #include "omap4-sar-layout.h" #include "common.h" #define MAX_NR_REG_BANKS 5 #define MAX_IRQS 160 #define WKG_MASK_ALL 0x00000000 #define WKG_UNMASK_ALL 0xffffffff #define CPU_ENA_OFFSET 0x400 #define CPU0_ID 0x0 #define CPU1_ID 0x1 #define OMAP4_NR_BANKS 4 #define OMAP4_NR_IRQS 128 static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_RAW_SPINLOCK(wakeupgen_lock); static unsigned int irq_target_cpu[MAX_IRQS]; static unsigned int irq_banks = MAX_NR_REG_BANKS; static unsigned int max_irqs = MAX_IRQS; static unsigned int omap_secure_apis; /* * Static helper functions. */ static inline u32 wakeupgen_readl(u8 idx, u32 cpu) { return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 + (cpu * CPU_ENA_OFFSET) + (idx * 4)); } static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) { __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + (cpu * CPU_ENA_OFFSET) + (idx * 4)); } static inline void sar_writel(u32 val, u32 offset, u8 idx) { __raw_writel(val, sar_base + offset + (idx * 4)); } static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) { unsigned int spi_irq; /* * PPIs and SGIs are not supported. */ if (irq < OMAP44XX_IRQ_GIC_START) return -EINVAL; /* * Subtract the GIC offset. */ spi_irq = irq - OMAP44XX_IRQ_GIC_START; if (spi_irq > MAX_IRQS) { pr_err("omap wakeupGen: Invalid IRQ%d\n", irq); return -EINVAL; } /* * Each WakeupGen register controls 32 interrupt. * i.e. 1 bit per SPI IRQ */ *reg_index = spi_irq >> 5; *bit_posn = spi_irq %= 32; return 0; } static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) { u32 val, bit_number; u8 i; if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) return; val = wakeupgen_readl(i, cpu); val &= ~BIT(bit_number); wakeupgen_writel(val, i, cpu); } static void _wakeupgen_set(unsigned int irq, unsigned int cpu) { u32 val, bit_number; u8 i; if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) return; val = wakeupgen_readl(i, cpu); val |= BIT(bit_number); wakeupgen_writel(val, i, cpu); } /* * Architecture specific Mask extension */ static void wakeupgen_mask(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&wakeupgen_lock, flags); _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]); raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); } /* * Architecture specific Unmask extension */ static void wakeupgen_unmask(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&wakeupgen_lock, flags); _wakeupgen_set(d->irq, irq_target_cpu[d->irq]); raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); } #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); static void _wakeupgen_save_masks(unsigned int cpu) { u8 i; for (i = 0; i < irq_banks; i++) per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); } static void _wakeupgen_restore_masks(unsigned int cpu) { u8 i; for (i = 0; i < irq_banks; i++) wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); } static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) { u8 i; for (i = 0; i < irq_banks; i++) wakeupgen_writel(reg, i, cpu); } /* * Mask or unmask all interrupts on given CPU. * 0 = Mask all interrupts on the 'cpu' * 1 = Unmask all interrupts on the 'cpu' * Ensure that the initial mask is maintained. This is faster than * iterating through GIC registers to arrive at the correct masks. */ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) { unsigned long flags; raw_spin_lock_irqsave(&wakeupgen_lock, flags); if (set) { _wakeupgen_save_masks(cpu); _wakeupgen_set_all(cpu, WKG_MASK_ALL); } else { _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); _wakeupgen_restore_masks(cpu); } raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); } #endif #ifdef CONFIG_CPU_PM static inline void omap4_irq_save_context(void) { u32 i, val; if (omap_rev() == OMAP4430_REV_ES1_0) return; for (i = 0; i < irq_banks; i++) { /* Save the CPUx interrupt mask for IRQ 0 to 127 */ val = wakeupgen_readl(i, 0); sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); val = wakeupgen_readl(i, 1); sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); /* * Disable the secure interrupts for CPUx. The restore * code blindly restores secure and non-secure interrupt * masks from SAR RAM. Secure interrupts are not suppose * to be enabled from HLOS. So overwrite the SAR location * so that the secure interrupt remains disabled. */ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); } /* Save AuxBoot* registers */ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET); val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_1); __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET); /* Save SyncReq generation logic */ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET); val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN); __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET); /* Set the Backup Bit Mask status */ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); val |= SAR_BACKUP_STATUS_WAKEUPGEN; __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); } static inline void omap5_irq_save_context(void) { u32 i, val; for (i = 0; i < irq_banks; i++) { /* Save the CPUx interrupt mask for IRQ 0 to 159 */ val = wakeupgen_readl(i, 0); sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i); val = wakeupgen_readl(i, 1); sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i); sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i); sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i); } /* Save AuxBoot* registers */ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET); val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET); /* Set the Backup Bit Mask status */ val = __raw_readl(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); val |= SAR_BACKUP_STATUS_WAKEUPGEN; __raw_writel(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET); } /* * Save WakeupGen interrupt context in SAR BANK3. Restore is done by * ROM code. WakeupGen IP is integrated along with GIC to manage the * interrupt wakeups from CPU low power states. It manages * masking/unmasking of Shared peripheral interrupts(SPI). So the * interrupt enable/disable control should be in sync and consistent * at WakeupGen and GIC so that interrupts are not lost. */ static void irq_save_context(void) { if (!sar_base) sar_base = omap4_get_sar_ram_base(); if (soc_is_omap54xx()) omap5_irq_save_context(); else omap4_irq_save_context(); } /* * Clear WakeupGen SAR backup status. */ static void irq_sar_clear(void) { u32 val; u32 offset = SAR_BACKUP_STATUS_OFFSET; if (soc_is_omap54xx()) offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; val = __raw_readl(sar_base + offset); val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; __raw_writel(val, sar_base + offset); } /* * Save GIC and Wakeupgen interrupt context using secure API * for HS/EMU devices. */ static void irq_save_secure_context(void) { u32 ret; ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, FLAG_START_CRITICAL, 0, 0, 0, 0, 0); if (ret != API_HAL_RET_VALUE_OK) pr_err("GIC and Wakeupgen context save failed\n"); } #endif #ifdef CONFIG_HOTPLUG_CPU static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)hcpu; switch (action) { case CPU_ONLINE: wakeupgen_irqmask_all(cpu, 0); break; case CPU_DEAD: wakeupgen_irqmask_all(cpu, 1); break; } return NOTIFY_OK; } static struct notifier_block __refdata irq_hotplug_notifier = { .notifier_call = irq_cpu_hotplug_notify, }; static void __init irq_hotplug_init(void) { register_hotcpu_notifier(&irq_hotplug_notifier); } #else static void __init irq_hotplug_init(void) {} #endif #ifdef CONFIG_CPU_PM static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (omap_type() == OMAP2_DEVICE_TYPE_GP) irq_save_context(); else irq_save_secure_context(); break; case CPU_CLUSTER_PM_EXIT: if (omap_type() == OMAP2_DEVICE_TYPE_GP) irq_sar_clear(); break; } return NOTIFY_OK; } static struct notifier_block irq_notifier_block = { .notifier_call = irq_notifier, }; static void __init irq_pm_init(void) { /* FIXME: Remove this when MPU OSWR support is added */ if (!soc_is_omap54xx()) cpu_pm_register_notifier(&irq_notifier_block); } #else static void __init irq_pm_init(void) {} #endif void __iomem *omap_get_wakeupgen_base(void) { return wakeupgen_base; } int omap_secure_apis_support(void) { return omap_secure_apis; } /* * Initialise the wakeupgen module. */ int __init omap_wakeupgen_init(void) { int i; unsigned int boot_cpu = smp_processor_id(); /* Not supported on OMAP4 ES1.0 silicon */ if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); return -EPERM; } /* Static mapping, never released */ wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K); if (WARN_ON(!wakeupgen_base)) return -ENOMEM; if (cpu_is_omap44xx()) { irq_banks = OMAP4_NR_BANKS; max_irqs = OMAP4_NR_IRQS; omap_secure_apis = 1; } /* Clear all IRQ bitmasks at wakeupGen level */ for (i = 0; i < irq_banks; i++) { wakeupgen_writel(0, i, CPU0_ID); wakeupgen_writel(0, i, CPU1_ID); } /* * Override GIC architecture specific functions to add * OMAP WakeupGen interrupt controller along with GIC */ gic_arch_extn.irq_mask = wakeupgen_mask; gic_arch_extn.irq_unmask = wakeupgen_unmask; gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; /* * FIXME: Add support to set_smp_affinity() once the core * GIC code has necessary hooks in place. */ /* Associate all the IRQs to boot CPU like GIC init does. */ for (i = 0; i < max_irqs; i++) irq_target_cpu[i] = boot_cpu; irq_hotplug_init(); irq_pm_init(); return 0; }
gpl-2.0
sebirdman/android_kernel_motorola_msm8992
fs/xfs/xfs_bmap.c
2083
182113
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_mount.h" #include "xfs_itable.h" #include "xfs_inode_item.h" #include "xfs_extfree_item.h" #include "xfs_alloc.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_attr_leaf.h" #include "xfs_quota.h" #include "xfs_trans_space.h" #include "xfs_buf_item.h" #include "xfs_filestream.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" #include "xfs_symlink.h" kmem_zone_t *xfs_bmap_free_item_zone; /* * Miscellaneous helper functions */ /* * Compute and fill in the value of the maximum depth of a bmap btree * in this filesystem. Done once, during mount. */ void xfs_bmap_compute_maxlevels( xfs_mount_t *mp, /* file system mount structure */ int whichfork) /* data or attr fork */ { int level; /* btree level */ uint maxblocks; /* max blocks at this level */ uint maxleafents; /* max leaf entries possible */ int maxrootrecs; /* max records in root block */ int minleafrecs; /* min records in leaf block */ int minnoderecs; /* min records in node block */ int sz; /* root block size */ /* * The maximum number of extents in a file, hence the maximum * number of leaf entries, is controlled by the type of di_nextents * (a signed 32-bit number, xfs_extnum_t), or by di_anextents * (a signed 16-bit number, xfs_aextnum_t). * * Note that we can no longer assume that if we are in ATTR1 that * the fork offset of all the inodes will be * (xfs_default_attroffset(ip) >> 3) because we could have mounted * with ATTR2 and then mounted back with ATTR1, keeping the * di_forkoff's fixed but probably at various positions. Therefore, * for both ATTR1 and ATTR2 we have to assume the worst case scenario * of a minimum size available. */ if (whichfork == XFS_DATA_FORK) { maxleafents = MAXEXTNUM; sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); } else { maxleafents = MAXAEXTNUM; sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); } maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0); minleafrecs = mp->m_bmap_dmnr[0]; minnoderecs = mp->m_bmap_dmnr[1]; maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; for (level = 1; maxblocks > 1; level++) { if (maxblocks <= maxrootrecs) maxblocks = 1; else maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; } mp->m_bm_maxlevels[whichfork] = level; } /* * Convert the given file system block to a disk block. We have to treat it * differently based on whether the file is a real time file or not, because the * bmap code does. */ xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) { return (XFS_IS_REALTIME_INODE(ip) ? \ (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); } STATIC int /* error */ xfs_bmbt_lookup_eq( struct xfs_btree_cur *cur, xfs_fileoff_t off, xfs_fsblock_t bno, xfs_filblks_t len, int *stat) /* success/failure */ { cur->bc_rec.b.br_startoff = off; cur->bc_rec.b.br_startblock = bno; cur->bc_rec.b.br_blockcount = len; return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); } STATIC int /* error */ xfs_bmbt_lookup_ge( struct xfs_btree_cur *cur, xfs_fileoff_t off, xfs_fsblock_t bno, xfs_filblks_t len, int *stat) /* success/failure */ { cur->bc_rec.b.br_startoff = off; cur->bc_rec.b.br_startblock = bno; cur->bc_rec.b.br_blockcount = len; return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); } /* * Check if the inode needs to be converted to btree format. */ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) { return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && XFS_IFORK_NEXTENTS(ip, whichfork) > XFS_IFORK_MAXEXT(ip, whichfork); } /* * Check if the inode should be converted to extent format. */ static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) { return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && XFS_IFORK_NEXTENTS(ip, whichfork) <= XFS_IFORK_MAXEXT(ip, whichfork); } /* * Update the record referred to by cur to the value given * by [off, bno, len, state]. * This either works (return 0) or gets an EFSCORRUPTED error. */ STATIC int xfs_bmbt_update( struct xfs_btree_cur *cur, xfs_fileoff_t off, xfs_fsblock_t bno, xfs_filblks_t len, xfs_exntst_t state) { union xfs_btree_rec rec; xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); return xfs_btree_update(cur, &rec); } /* * Compute the worst-case number of indirect blocks that will be used * for ip's delayed extent of length "len". */ STATIC xfs_filblks_t xfs_bmap_worst_indlen( xfs_inode_t *ip, /* incore inode pointer */ xfs_filblks_t len) /* delayed extent length */ { int level; /* btree level number */ int maxrecs; /* maximum record count at this level */ xfs_mount_t *mp; /* mount structure */ xfs_filblks_t rval; /* return value */ mp = ip->i_mount; maxrecs = mp->m_bmap_dmxr[0]; for (level = 0, rval = 0; level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); level++) { len += maxrecs - 1; do_div(len, maxrecs); rval += len; if (len == 1) return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - level - 1; if (level == 0) maxrecs = mp->m_bmap_dmxr[1]; } return rval; } /* * Calculate the default attribute fork offset for newly created inodes. */ uint xfs_default_attroffset( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; uint offset; if (mp->m_sb.sb_inodesize == 256) { offset = XFS_LITINO(mp, ip->i_d.di_version) - XFS_BMDR_SPACE_CALC(MINABTPTRS); } else { offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); } ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); return offset; } /* * Helper routine to reset inode di_forkoff field when switching * attribute fork from local to extent format - we reset it where * possible to make space available for inline data fork extents. */ STATIC void xfs_bmap_forkoff_reset( xfs_mount_t *mp, xfs_inode_t *ip, int whichfork) { if (whichfork == XFS_ATTR_FORK && ip->i_d.di_format != XFS_DINODE_FMT_DEV && ip->i_d.di_format != XFS_DINODE_FMT_UUID && ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; if (dfl_forkoff > ip->i_d.di_forkoff) ip->i_d.di_forkoff = dfl_forkoff; } } /* * Extent tree block counting routines. */ /* * Count leaf blocks given a range of extent records. */ STATIC void xfs_bmap_count_leaves( xfs_ifork_t *ifp, xfs_extnum_t idx, int numrecs, int *count) { int b; for (b = 0; b < numrecs; b++) { xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); *count += xfs_bmbt_get_blockcount(frp); } } /* * Count leaf blocks given a range of extent records originally * in btree format. */ STATIC void xfs_bmap_disk_count_leaves( struct xfs_mount *mp, struct xfs_btree_block *block, int numrecs, int *count) { int b; xfs_bmbt_rec_t *frp; for (b = 1; b <= numrecs; b++) { frp = XFS_BMBT_REC_ADDR(mp, block, b); *count += xfs_bmbt_disk_get_blockcount(frp); } } /* * Recursively walks each level of a btree * to count total fsblocks is use. */ STATIC int /* error */ xfs_bmap_count_tree( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fsblock_t blockno, /* file system block number */ int levelin, /* level in btree */ int *count) /* Count of blocks */ { int error; xfs_buf_t *bp, *nbp; int level = levelin; __be64 *pp; xfs_fsblock_t bno = blockno; xfs_fsblock_t nextbno; struct xfs_btree_block *block, *nextblock; int numrecs; error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) return error; *count += 1; block = XFS_BUF_TO_BLOCK(bp); if (--level) { /* Not at node above leaves, count this level of nodes */ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); while (nextbno != NULLFSBLOCK) { error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) return error; *count += 1; nextblock = XFS_BUF_TO_BLOCK(nbp); nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); xfs_trans_brelse(tp, nbp); } /* Dive to the next level */ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); if (unlikely((error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } xfs_trans_brelse(tp, bp); } else { /* count all level 1 nodes and their leaves */ for (;;) { nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); xfs_bmap_disk_count_leaves(mp, block, numrecs, count); xfs_trans_brelse(tp, bp); if (nextbno == NULLFSBLOCK) break; bno = nextbno; error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) return error; *count += 1; block = XFS_BUF_TO_BLOCK(bp); } } return 0; } /* * Count fsblocks of the given fork. */ int /* error */ xfs_bmap_count_blocks( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode */ int whichfork, /* data or attr fork */ int *count) /* out: count of blocks */ { struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ xfs_mount_t *mp; /* file system mount structure */ __be64 *pp; /* pointer to block address */ bno = NULLFSBLOCK; mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { xfs_bmap_count_leaves(ifp, 0, ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), count); return 0; } /* * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. */ block = ifp->if_broot; level = be16_to_cpu(block->bb_level); ASSERT(level > 0); pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLDFSBNO); ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } return 0; } /* * Debug/sanity checking code */ STATIC int xfs_bmap_sanity_check( struct xfs_mount *mp, struct xfs_buf *bp, int level) { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); if (block->bb_magic != cpu_to_be32(XFS_BMAP_CRC_MAGIC) && block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC)) return 0; if (be16_to_cpu(block->bb_level) != level || be16_to_cpu(block->bb_numrecs) == 0 || be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0]) return 0; return 1; } #ifdef DEBUG STATIC struct xfs_buf * xfs_bmap_get_bp( struct xfs_btree_cur *cur, xfs_fsblock_t bno) { struct xfs_log_item_desc *lidp; int i; if (!cur) return NULL; for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { if (!cur->bc_bufs[i]) break; if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) return cur->bc_bufs[i]; } /* Chase down all the log items to see if the bp is there */ list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { struct xfs_buf_log_item *bip; bip = (struct xfs_buf_log_item *)lidp->lid_item; if (bip->bli_item.li_type == XFS_LI_BUF && XFS_BUF_ADDR(bip->bli_buf) == bno) return bip->bli_buf; } return NULL; } STATIC void xfs_check_block( struct xfs_btree_block *block, xfs_mount_t *mp, int root, short sz) { int i, j, dmxr; __be64 *pp, *thispa; /* pointer to block address */ xfs_bmbt_key_t *prevp, *keyp; ASSERT(be16_to_cpu(block->bb_level) > 0); prevp = NULL; for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { dmxr = mp->m_bmap_dmxr[0]; keyp = XFS_BMBT_KEY_ADDR(mp, block, i); if (prevp) { ASSERT(be64_to_cpu(prevp->br_startoff) < be64_to_cpu(keyp->br_startoff)); } prevp = keyp; /* * Compare the block numbers to see if there are dups. */ if (root) pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); else pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { if (root) thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); else thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); if (*thispa == *pp) { xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", __func__, j, i, (unsigned long long)be64_to_cpu(*thispa)); panic("%s: ptrs are equal in node\n", __func__); } } } } /* * Check that the extents for the inode ip are in the right order in all * btree leaves. */ STATIC void xfs_bmap_check_leaf_extents( xfs_btree_cur_t *cur, /* btree cursor or null */ xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ xfs_extnum_t i=0, j; /* index into the extents list */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ xfs_mount_t *mp; /* file system mount structure */ __be64 *pp; /* pointer to block address */ xfs_bmbt_rec_t *ep; /* pointer to current extent */ xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ xfs_bmbt_rec_t *nextp; /* pointer to next extent */ int bp_release = 0; if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { return; } bno = NULLFSBLOCK; mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); block = ifp->if_broot; /* * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. */ level = be16_to_cpu(block->bb_level); ASSERT(level > 0); xfs_check_block(block, mp, 1, ifp->if_broot_bytes); pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLDFSBNO); ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); /* * Go down the tree until leaf level is reached, following the first * pointer (leftmost) at each level. */ while (level-- > 0) { /* See if buf is in cur first */ bp_release = 0; bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); if (!bp) { bp_release = 1; error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) goto error_norelse; } block = XFS_BUF_TO_BLOCK(bp); XFS_WANT_CORRUPTED_GOTO( xfs_bmap_sanity_check(mp, bp, level), error0); if (level == 0) break; /* * Check this block for basic sanity (increasing keys and * no duplicate blocks). */ xfs_check_block(block, mp, 0, 0); pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); if (bp_release) { bp_release = 0; xfs_trans_brelse(NULL, bp); } } /* * Here with bp and block set to the leftmost leaf node in the tree. */ i = 0; /* * Loop over all leaf nodes checking that all extents are in the right order. */ for (;;) { xfs_fsblock_t nextbno; xfs_extnum_t num_recs; num_recs = xfs_btree_get_numrecs(block); /* * Read-ahead the next leaf block, if any. */ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); /* * Check all the extents to make sure they are OK. * If we had a previous block, the last entry should * conform with the first entry in this one. */ ep = XFS_BMBT_REC_ADDR(mp, block, 1); if (i) { ASSERT(xfs_bmbt_disk_get_startoff(&last) + xfs_bmbt_disk_get_blockcount(&last) <= xfs_bmbt_disk_get_startoff(ep)); } for (j = 1; j < num_recs; j++) { nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); ASSERT(xfs_bmbt_disk_get_startoff(ep) + xfs_bmbt_disk_get_blockcount(ep) <= xfs_bmbt_disk_get_startoff(nextp)); ep = nextp; } last = *ep; i += num_recs; if (bp_release) { bp_release = 0; xfs_trans_brelse(NULL, bp); } bno = nextbno; /* * If we've reached the end, stop. */ if (bno == NULLFSBLOCK) break; bp_release = 0; bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); if (!bp) { bp_release = 1; error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) goto error_norelse; } block = XFS_BUF_TO_BLOCK(bp); } if (bp_release) { bp_release = 0; xfs_trans_brelse(NULL, bp); } return; error0: xfs_warn(mp, "%s: at error0", __func__); if (bp_release) xfs_trans_brelse(NULL, bp); error_norelse: xfs_warn(mp, "%s: BAD after btree leaves for %d extents", __func__, i); panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); return; } /* * Add bmap trace insert entries for all the contents of the extent records. */ void xfs_bmap_trace_exlist( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t cnt, /* count of entries in the list */ int whichfork, /* data or attr fork */ unsigned long caller_ip) { xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ int state = 0; if (whichfork == XFS_ATTR_FORK) state |= BMAP_ATTRFORK; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); for (idx = 0; idx < cnt; idx++) trace_xfs_extlist(ip, idx, whichfork, caller_ip); } /* * Validate that the bmbt_irecs being returned from bmapi are valid * given the callers original parameters. Specifically check the * ranges of the returned irecs to ensure that they only extent beyond * the given parameters if the XFS_BMAPI_ENTIRE flag was set. */ STATIC void xfs_bmap_validate_ret( xfs_fileoff_t bno, xfs_filblks_t len, int flags, xfs_bmbt_irec_t *mval, int nmap, int ret_nmap) { int i; /* index to map values */ ASSERT(ret_nmap <= nmap); for (i = 0; i < ret_nmap; i++) { ASSERT(mval[i].br_blockcount > 0); if (!(flags & XFS_BMAPI_ENTIRE)) { ASSERT(mval[i].br_startoff >= bno); ASSERT(mval[i].br_blockcount <= len); ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= bno + len); } else { ASSERT(mval[i].br_startoff < bno + len); ASSERT(mval[i].br_startoff + mval[i].br_blockcount > bno); } ASSERT(i == 0 || mval[i - 1].br_startoff + mval[i - 1].br_blockcount == mval[i].br_startoff); ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && mval[i].br_startblock != HOLESTARTBLOCK); ASSERT(mval[i].br_state == XFS_EXT_NORM || mval[i].br_state == XFS_EXT_UNWRITTEN); } } #else #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) #endif /* DEBUG */ /* * bmap free list manipulation functions */ /* * Add the extent to the list of extents to be free at transaction end. * The list is maintained sorted (by block number). */ void xfs_bmap_add_free( xfs_fsblock_t bno, /* fs block number of extent */ xfs_filblks_t len, /* length of extent */ xfs_bmap_free_t *flist, /* list of extents */ xfs_mount_t *mp) /* mount point structure */ { xfs_bmap_free_item_t *cur; /* current (next) element */ xfs_bmap_free_item_t *new; /* new element */ xfs_bmap_free_item_t *prev; /* previous element */ #ifdef DEBUG xfs_agnumber_t agno; xfs_agblock_t agbno; ASSERT(bno != NULLFSBLOCK); ASSERT(len > 0); ASSERT(len <= MAXEXTLEN); ASSERT(!isnullstartblock(bno)); agno = XFS_FSB_TO_AGNO(mp, bno); agbno = XFS_FSB_TO_AGBNO(mp, bno); ASSERT(agno < mp->m_sb.sb_agcount); ASSERT(agbno < mp->m_sb.sb_agblocks); ASSERT(len < mp->m_sb.sb_agblocks); ASSERT(agbno + len <= mp->m_sb.sb_agblocks); #endif ASSERT(xfs_bmap_free_item_zone != NULL); new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); new->xbfi_startblock = bno; new->xbfi_blockcount = (xfs_extlen_t)len; for (prev = NULL, cur = flist->xbf_first; cur != NULL; prev = cur, cur = cur->xbfi_next) { if (cur->xbfi_startblock >= bno) break; } if (prev) prev->xbfi_next = new; else flist->xbf_first = new; new->xbfi_next = cur; flist->xbf_count++; } /* * Remove the entry "free" from the free item list. Prev points to the * previous entry, unless "free" is the head of the list. */ STATIC void xfs_bmap_del_free( xfs_bmap_free_t *flist, /* free item list header */ xfs_bmap_free_item_t *prev, /* previous item on list, if any */ xfs_bmap_free_item_t *free) /* list item to be freed */ { if (prev) prev->xbfi_next = free->xbfi_next; else flist->xbf_first = free->xbfi_next; flist->xbf_count--; kmem_zone_free(xfs_bmap_free_item_zone, free); } /* * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi * caller. Frees all the extents that need freeing, which must be done * last due to locking considerations. We never free any extents in * the first transaction. * * Return 1 if the given transaction was committed and a new one * started, and 0 otherwise in the committed parameter. */ int /* error */ xfs_bmap_finish( xfs_trans_t **tp, /* transaction pointer addr */ xfs_bmap_free_t *flist, /* i/o: list extents to free */ int *committed) /* xact committed or not */ { xfs_efd_log_item_t *efd; /* extent free data */ xfs_efi_log_item_t *efi; /* extent free intention */ int error; /* error return value */ xfs_bmap_free_item_t *free; /* free extent item */ unsigned int logres; /* new log reservation */ unsigned int logcount; /* new log count */ xfs_mount_t *mp; /* filesystem mount structure */ xfs_bmap_free_item_t *next; /* next item on free list */ xfs_trans_t *ntp; /* new transaction pointer */ ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); if (flist->xbf_count == 0) { *committed = 0; return 0; } ntp = *tp; efi = xfs_trans_get_efi(ntp, flist->xbf_count); for (free = flist->xbf_first; free; free = free->xbfi_next) xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock, free->xbfi_blockcount); logres = ntp->t_log_res; logcount = ntp->t_log_count; ntp = xfs_trans_dup(*tp); error = xfs_trans_commit(*tp, 0); *tp = ntp; *committed = 1; /* * We have a new transaction, so we should return committed=1, * even though we're returning an error. */ if (error) return error; /* * transaction commit worked ok so we can drop the extra ticket * reference that we gained in xfs_trans_dup() */ xfs_log_ticket_put(ntp->t_ticket); if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES, logcount))) return error; efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count); for (free = flist->xbf_first; free != NULL; free = next) { next = free->xbfi_next; if ((error = xfs_free_extent(ntp, free->xbfi_startblock, free->xbfi_blockcount))) { /* * The bmap free list will be cleaned up at a * higher level. The EFI will be canceled when * this transaction is aborted. * Need to force shutdown here to make sure it * happens, since this transaction may not be * dirty yet. */ mp = ntp->t_mountp; if (!XFS_FORCED_SHUTDOWN(mp)) xfs_force_shutdown(mp, (error == EFSCORRUPTED) ? SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR); return error; } xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock, free->xbfi_blockcount); xfs_bmap_del_free(flist, NULL, free); } return 0; } /* * Free up any items left in the list. */ void xfs_bmap_cancel( xfs_bmap_free_t *flist) /* list of bmap_free_items */ { xfs_bmap_free_item_t *free; /* free list item */ xfs_bmap_free_item_t *next; if (flist->xbf_count == 0) return; ASSERT(flist->xbf_first != NULL); for (free = flist->xbf_first; free; free = next) { next = free->xbfi_next; xfs_bmap_del_free(flist, NULL, free); } ASSERT(flist->xbf_count == 0); } /* * Inode fork format manipulation functions */ /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. * Since the file extents are already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ xfs_bmap_btree_to_extents( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_btree_cur_t *cur, /* btree cursor */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { /* REFERENCED */ struct xfs_btree_block *cblock;/* child btree block */ xfs_fsblock_t cbno; /* child block number */ xfs_buf_t *cbp; /* child block's buffer */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork data */ xfs_mount_t *mp; /* mount point structure */ __be64 *pp; /* ptr to block address */ struct xfs_btree_block *rblock;/* root btree block */ mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); rblock = ifp->if_broot; ASSERT(be16_to_cpu(rblock->bb_level) == 1); ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); cbno = be64_to_cpu(*pp); *logflagsp = 0; #ifdef DEBUG if ((error = xfs_btree_check_lptr(cur, cbno, 1))) return error; #endif error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) return error; cblock = XFS_BUF_TO_BLOCK(cbp); if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) return error; xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); ip->i_d.di_nblocks--; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); xfs_trans_binval(tp, cbp); if (cur->bc_bufs[0] == cbp) cur->bc_bufs[0] = NULL; xfs_iroot_realloc(ip, -1, whichfork); ASSERT(ifp->if_broot == NULL); ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); return 0; } /* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. */ STATIC int /* error */ xfs_bmap_extents_to_btree( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_fsblock_t *firstblock, /* first-block-allocated */ xfs_bmap_free_t *flist, /* blocks freed in xaction */ xfs_btree_cur_t **curp, /* cursor returned to caller */ int wasdel, /* converting a delayed alloc */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { struct xfs_btree_block *ablock; /* allocated (child) bt block */ xfs_buf_t *abp; /* buffer for ablock */ xfs_alloc_arg_t args; /* allocation arguments */ xfs_bmbt_rec_t *arp; /* child record pointer */ struct xfs_btree_block *block; /* btree root block */ xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmbt_rec_host_t *ep; /* extent record pointer */ int error; /* error return value */ xfs_extnum_t i, cnt; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_key_t *kp; /* root block key pointer */ xfs_mount_t *mp; /* mount structure */ xfs_extnum_t nextents; /* number of file extents */ xfs_bmbt_ptr_t *pp; /* root block address pointer */ mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); /* * Make space in the inode incore. */ xfs_iroot_realloc(ip, 1, whichfork); ifp->if_flags |= XFS_IFBROOT; /* * Fill in the root. */ block = ifp->if_broot; if (xfs_sb_version_hascrc(&mp->m_sb)) xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino, XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS); else xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, XFS_BMAP_MAGIC, 1, 1, ip->i_ino, XFS_BTREE_LONG_PTRS); /* * Need a cursor. Can't allocate until bb_level is filled in. */ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.flist = flist; cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; /* * Convert to a btree with two levels, one record in root. */ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = mp; args.firstblock = *firstblock; if (*firstblock == NULLFSBLOCK) { args.type = XFS_ALLOCTYPE_START_BNO; args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); } else if (flist->xbf_low) { args.type = XFS_ALLOCTYPE_START_BNO; args.fsbno = *firstblock; } else { args.type = XFS_ALLOCTYPE_NEAR_BNO; args.fsbno = *firstblock; } args.minlen = args.maxlen = args.prod = 1; args.wasdel = wasdel; *logflagsp = 0; if ((error = xfs_alloc_vextent(&args))) { xfs_iroot_realloc(ip, -1, whichfork); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } /* * Allocation can't fail, the space was reserved. */ ASSERT(args.fsbno != NULLFSBLOCK); ASSERT(*firstblock == NULLFSBLOCK || args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) || (flist->xbf_low && args.agno > XFS_FSB_TO_AGNO(mp, *firstblock))); *firstblock = cur->bc_private.b.firstblock = args.fsbno; cur->bc_private.b.allocated++; ip->i_d.di_nblocks++; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); /* * Fill in the child block. */ abp->b_ops = &xfs_bmbt_buf_ops; ablock = XFS_BUF_TO_BLOCK(abp); if (xfs_sb_version_hascrc(&mp->m_sb)) xfs_btree_init_block_int(mp, ablock, abp->b_bn, XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino, XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS); else xfs_btree_init_block_int(mp, ablock, abp->b_bn, XFS_BMAP_MAGIC, 0, 0, ip->i_ino, XFS_BTREE_LONG_PTRS); arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (cnt = i = 0; i < nextents; i++) { ep = xfs_iext_get_ext(ifp, i); if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { arp->l0 = cpu_to_be64(ep->l0); arp->l1 = cpu_to_be64(ep->l1); arp++; cnt++; } } ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); xfs_btree_set_numrecs(ablock, cnt); /* * Fill in the root key and pointer. */ kp = XFS_BMBT_KEY_ADDR(mp, block, 1); arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, be16_to_cpu(block->bb_level))); *pp = cpu_to_be64(args.fsbno); /* * Do all this logging at the end so that * the root is at the right level. */ xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); ASSERT(*curp == NULL); *curp = cur; *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); return 0; } /* * Convert a local file to an extents file. * This code is out of bounds for data forks of regular files, * since the file data needs to get logged so things will stay consistent. * (The bmap-level manipulations are ok, though). */ STATIC int /* error */ xfs_bmap_local_to_extents( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_fsblock_t *firstblock, /* first block allocated in xaction */ xfs_extlen_t total, /* total blocks needed by transaction */ int *logflagsp, /* inode logging flags */ int whichfork, void (*init_fn)(struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *ip, struct xfs_ifork *ifp)) { int error; /* error return value */ int flags; /* logging flags returned */ xfs_ifork_t *ifp; /* inode fork pointer */ /* * We don't want to deal with the case of keeping inode data inline yet. * So sending the data fork of a regular inode is invalid. */ ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK)); ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); flags = 0; error = 0; if (ifp->if_bytes) { xfs_alloc_arg_t args; /* allocation arguments */ xfs_buf_t *bp; /* buffer for extent block */ xfs_bmbt_rec_host_t *ep;/* extent record pointer */ ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = ip->i_mount; args.firstblock = *firstblock; /* * Allocate a block. We know we need only one, since the * file currently fits in an inode. */ if (*firstblock == NULLFSBLOCK) { args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); args.type = XFS_ALLOCTYPE_START_BNO; } else { args.fsbno = *firstblock; args.type = XFS_ALLOCTYPE_NEAR_BNO; } args.total = total; args.minlen = args.maxlen = args.prod = 1; error = xfs_alloc_vextent(&args); if (error) goto done; /* Can't fail, the space was reserved. */ ASSERT(args.fsbno != NULLFSBLOCK); ASSERT(args.len == 1); *firstblock = args.fsbno; bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); /* initialise the block and copy the data */ init_fn(tp, bp, ip, ifp); /* account for the change in fork size and log everything */ xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); xfs_iext_add(ifp, 0, 1); ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); trace_xfs_bmap_post_update(ip, 0, whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, _THIS_IP_); XFS_IFORK_NEXT_SET(ip, whichfork, 1); ip->i_d.di_nblocks = 1; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); flags |= xfs_ilog_fext(whichfork); } else { ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork); } ifp->if_flags &= ~XFS_IFINLINE; ifp->if_flags |= XFS_IFEXTENTS; XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); flags |= XFS_ILOG_CORE; done: *logflagsp = flags; return error; } /* * Called from xfs_bmap_add_attrfork to handle btree format files. */ STATIC int /* error */ xfs_bmap_add_attrfork_btree( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_fsblock_t *firstblock, /* first block allocated */ xfs_bmap_free_t *flist, /* blocks to free at commit */ int *flags) /* inode logging flags */ { xfs_btree_cur_t *cur; /* btree cursor */ int error; /* error return value */ xfs_mount_t *mp; /* file system mount struct */ int stat; /* newroot status */ mp = ip->i_mount; if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) *flags |= XFS_ILOG_DBROOT; else { cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); cur->bc_private.b.flist = flist; cur->bc_private.b.firstblock = *firstblock; if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) goto error0; /* must be at least one entry */ XFS_WANT_CORRUPTED_GOTO(stat == 1, error0); if ((error = xfs_btree_new_iroot(cur, flags, &stat))) goto error0; if (stat == 0) { xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); return XFS_ERROR(ENOSPC); } *firstblock = cur->bc_private.b.firstblock; cur->bc_private.b.allocated = 0; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); } return 0; error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } /* * Called from xfs_bmap_add_attrfork to handle extents format files. */ STATIC int /* error */ xfs_bmap_add_attrfork_extents( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_fsblock_t *firstblock, /* first block allocated */ xfs_bmap_free_t *flist, /* blocks to free at commit */ int *flags) /* inode logging flags */ { xfs_btree_cur_t *cur; /* bmap btree cursor */ int error; /* error return value */ if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) return 0; cur = NULL; error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, flags, XFS_DATA_FORK); if (cur) { cur->bc_private.b.allocated = 0; xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); } return error; } /* * Block initialisation function for local to extent format conversion. * * This shouldn't actually be called by anyone, so make sure debug kernels cause * a noticable failure. */ STATIC void xfs_bmap_local_to_extents_init_fn( struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *ip, struct xfs_ifork *ifp) { ASSERT(0); bp->b_ops = &xfs_bmbt_buf_ops; memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF); } /* * Called from xfs_bmap_add_attrfork to handle local format files. Each * different data fork content type needs a different callout to do the * conversion. Some are basic and only require special block initialisation * callouts for the data formating, others (directories) are so specialised they * handle everything themselves. * * XXX (dgc): investigate whether directory conversion can use the generic * formatting callout. It should be possible - it's just a very complex * formatter. */ STATIC int /* error */ xfs_bmap_add_attrfork_local( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_fsblock_t *firstblock, /* first block allocated */ xfs_bmap_free_t *flist, /* blocks to free at commit */ int *flags) /* inode logging flags */ { xfs_da_args_t dargs; /* args for dir/attr code */ if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) return 0; if (S_ISDIR(ip->i_d.di_mode)) { memset(&dargs, 0, sizeof(dargs)); dargs.dp = ip; dargs.firstblock = firstblock; dargs.flist = flist; dargs.total = ip->i_mount->m_dirblkfsbs; dargs.whichfork = XFS_DATA_FORK; dargs.trans = tp; return xfs_dir2_sf_to_block(&dargs); } if (S_ISLNK(ip->i_d.di_mode)) return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, XFS_DATA_FORK, xfs_symlink_local_to_remote); return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, XFS_DATA_FORK, xfs_bmap_local_to_extents_init_fn); } /* * Convert inode from non-attributed to attributed. * Must not be in a transaction, ip must not be locked. */ int /* error code */ xfs_bmap_add_attrfork( xfs_inode_t *ip, /* incore inode pointer */ int size, /* space new attribute needs */ int rsvd) /* xact may use reserved blks */ { xfs_fsblock_t firstblock; /* 1st block/ag allocated */ xfs_bmap_free_t flist; /* freed extent records */ xfs_mount_t *mp; /* mount structure */ xfs_trans_t *tp; /* transaction pointer */ int blks; /* space reservation */ int version = 1; /* superblock attr version */ int committed; /* xaction was committed */ int logflags; /* logging flags */ int error; /* error return value */ ASSERT(XFS_IFORK_Q(ip) == 0); mp = ip->i_mount; ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK); blks = XFS_ADDAFORK_SPACE_RES(mp); if (rsvd) tp->t_flags |= XFS_TRANS_RESERVE; if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) goto error0; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : XFS_QMOPT_RES_REGBLKS); if (error) { xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES); return error; } if (XFS_IFORK_Q(ip)) goto error1; if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { /* * For inodes coming from pre-6.2 filesystems. */ ASSERT(ip->i_d.di_aformat == 0); ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; } ASSERT(ip->i_d.di_anextents == 0); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); switch (ip->i_d.di_format) { case XFS_DINODE_FMT_DEV: ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; break; case XFS_DINODE_FMT_UUID: ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; break; case XFS_DINODE_FMT_LOCAL: case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); if (!ip->i_d.di_forkoff) ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; else if (mp->m_flags & XFS_MOUNT_ATTR2) version = 2; break; default: ASSERT(0); error = XFS_ERROR(EINVAL); goto error1; } ASSERT(ip->i_afp == NULL); ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); ip->i_afp->if_flags = XFS_IFEXTENTS; logflags = 0; xfs_bmap_init(&flist, &firstblock); switch (ip->i_d.di_format) { case XFS_DINODE_FMT_LOCAL: error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, &logflags); break; case XFS_DINODE_FMT_EXTENTS: error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, &flist, &logflags); break; case XFS_DINODE_FMT_BTREE: error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist, &logflags); break; default: error = 0; break; } if (logflags) xfs_trans_log_inode(tp, ip, logflags); if (error) goto error2; if (!xfs_sb_version_hasattr(&mp->m_sb) || (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { __int64_t sbfields = 0; spin_lock(&mp->m_sb_lock); if (!xfs_sb_version_hasattr(&mp->m_sb)) { xfs_sb_version_addattr(&mp->m_sb); sbfields |= XFS_SB_VERSIONNUM; } if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { xfs_sb_version_addattr2(&mp->m_sb); sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); } if (sbfields) { spin_unlock(&mp->m_sb_lock); xfs_mod_sb(tp, sbfields); } else spin_unlock(&mp->m_sb_lock); } error = xfs_bmap_finish(&tp, &flist, &committed); if (error) goto error2; return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); error2: xfs_bmap_cancel(&flist); error1: xfs_iunlock(ip, XFS_ILOCK_EXCL); error0: xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); return error; } /* * Internal and external extent tree search functions. */ /* * Read in the extents to if_extents. * All inode fields are set up by caller, we just traverse the btree * and copy the records in. If the file system cannot contain unwritten * extents, the records are checked for no "state" flags. */ int /* error */ xfs_bmap_read_extents( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode */ int whichfork) /* data or attr fork */ { struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ xfs_extnum_t i, j; /* index into the extents list */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ xfs_mount_t *mp; /* file system mount structure */ __be64 *pp; /* pointer to block address */ /* REFERENCED */ xfs_extnum_t room; /* number of entries there's room for */ bno = NULLFSBLOCK; mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE : XFS_EXTFMT_INODE(ip); block = ifp->if_broot; /* * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. */ level = be16_to_cpu(block->bb_level); ASSERT(level > 0); pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLDFSBNO); ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); /* * Go down the tree until leaf level is reached, following the first * pointer (leftmost) at each level. */ while (level-- > 0) { error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) return error; block = XFS_BUF_TO_BLOCK(bp); XFS_WANT_CORRUPTED_GOTO( xfs_bmap_sanity_check(mp, bp, level), error0); if (level == 0) break; pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); xfs_trans_brelse(tp, bp); } /* * Here with bp and block set to the leftmost leaf node in the tree. */ room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); i = 0; /* * Loop over all leaf nodes. Copy information to the extent records. */ for (;;) { xfs_bmbt_rec_t *frp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; xfs_extnum_t start; num_recs = xfs_btree_get_numrecs(block); if (unlikely(i + num_recs > room)) { ASSERT(i + num_recs <= room); xfs_warn(ip->i_mount, "corrupt dinode %Lu, (btree extents).", (unsigned long long) ip->i_ino); XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", XFS_ERRLEVEL_LOW, ip->i_mount, block); goto error0; } XFS_WANT_CORRUPTED_GOTO( xfs_bmap_sanity_check(mp, bp, 0), error0); /* * Read-ahead the next leaf block, if any. */ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); if (nextbno != NULLFSBLOCK) xfs_btree_reada_bufl(mp, nextbno, 1, &xfs_bmbt_buf_ops); /* * Copy records into the extent records. */ frp = XFS_BMBT_REC_ADDR(mp, block, 1); start = i; for (j = 0; j < num_recs; j++, i++, frp++) { xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); trp->l0 = be64_to_cpu(frp->l0); trp->l1 = be64_to_cpu(frp->l1); } if (exntf == XFS_EXTFMT_NOSTATE) { /* * Check all attribute bmap btree records and * any "older" data bmap btree records for a * set bit in the "extent flag" position. */ if (unlikely(xfs_check_nostate_extents(ifp, start, num_recs))) { XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); goto error0; } } xfs_trans_brelse(tp, bp); bno = nextbno; /* * If we've reached the end, stop. */ if (bno == NULLFSBLOCK) break; error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); if (error) return error; block = XFS_BUF_TO_BLOCK(bp); } ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); return 0; error0: xfs_trans_brelse(tp, bp); return XFS_ERROR(EFSCORRUPTED); } /* * Search the extent records for the entry containing block bno. * If bno lies in a hole, point to the next entry. If bno lies * past eof, *eofp will be set, and *prevp will contain the last * entry (null if none). Else, *lastxp will be set to the index * of the found entry; *gotp will contain the entry. */ STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */ xfs_bmap_search_multi_extents( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fileoff_t bno, /* block number searched for */ int *eofp, /* out: end of file found */ xfs_extnum_t *lastxp, /* out: last extent index */ xfs_bmbt_irec_t *gotp, /* out: extent entry found */ xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ { xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_extnum_t lastx; /* last extent index */ /* * Initialize the extent entry structure to catch access to * uninitialized br_startblock field. */ gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; gotp->br_state = XFS_EXT_INVALID; #if XFS_BIG_BLKNOS gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; #else gotp->br_startblock = 0xffffa5a5; #endif prevp->br_startoff = NULLFILEOFF; ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); if (lastx > 0) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp); } if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { xfs_bmbt_get_all(ep, gotp); *eofp = 0; } else { if (lastx > 0) { *gotp = *prevp; } *eofp = 1; ep = NULL; } *lastxp = lastx; return ep; } /* * Search the extents list for the inode, for the extent containing bno. * If bno lies in a hole, point to the next entry. If bno lies past eof, * *eofp will be set, and *prevp will contain the last entry (null if none). * Else, *lastxp will be set to the index of the found * entry; *gotp will contain the entry. */ STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */ xfs_bmap_search_extents( xfs_inode_t *ip, /* incore inode pointer */ xfs_fileoff_t bno, /* block number searched for */ int fork, /* data or attr fork */ int *eofp, /* out: end of file found */ xfs_extnum_t *lastxp, /* out: last extent index */ xfs_bmbt_irec_t *gotp, /* out: extent entry found */ xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ { xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_rec_host_t *ep; /* extent record pointer */ XFS_STATS_INC(xs_look_exlist); ifp = XFS_IFORK_PTR(ip, fork); ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) && !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) { xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, "Access to block zero in inode %llu " "start_block: %llx start_off: %llx " "blkcnt: %llx extent-state: %x lastx: %x\n", (unsigned long long)ip->i_ino, (unsigned long long)gotp->br_startblock, (unsigned long long)gotp->br_startoff, (unsigned long long)gotp->br_blockcount, gotp->br_state, *lastxp); *lastxp = NULLEXTNUM; *eofp = 1; return NULL; } return ep; } /* * Returns the file-relative block number of the first unused block(s) * in the file with at least "len" logically contiguous blocks free. * This is the lowest-address hole if the file has holes, else the first block * past the end of file. * Return 0 if the file is currently local (in-inode). */ int /* error */ xfs_bmap_first_unused( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode */ xfs_extlen_t len, /* size of hole to find */ xfs_fileoff_t *first_unused, /* unused block */ int whichfork) /* data or attr fork */ { int error; /* error return value */ int idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t lastaddr; /* last block number seen */ xfs_fileoff_t lowest; /* lowest useful block */ xfs_fileoff_t max; /* starting useful block */ xfs_fileoff_t off; /* offset for this block */ xfs_extnum_t nextents; /* number of extent entries */ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { *first_unused = 0; return 0; } ifp = XFS_IFORK_PTR(ip, whichfork); if (!(ifp->if_flags & XFS_IFEXTENTS) && (error = xfs_iread_extents(tp, ip, whichfork))) return error; lowest = *first_unused; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); off = xfs_bmbt_get_startoff(ep); /* * See if the hole before this extent will work. */ if (off >= lowest + len && off - max >= len) { *first_unused = max; return 0; } lastaddr = off + xfs_bmbt_get_blockcount(ep); max = XFS_FILEOFF_MAX(lastaddr, lowest); } *first_unused = max; return 0; } /* * Returns the file-relative block number of the last block + 1 before * last_block (input value) in the file. * This is not based on i_size, it is based on the extent records. * Returns 0 for local files, as they do not have extent records. */ int /* error */ xfs_bmap_last_before( xfs_trans_t *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode */ xfs_fileoff_t *last_block, /* last block */ int whichfork) /* data or attr fork */ { xfs_fileoff_t bno; /* input file offset */ int eof; /* hit end of file */ xfs_bmbt_rec_host_t *ep; /* pointer to last extent */ int error; /* error return value */ xfs_bmbt_irec_t got; /* current extent value */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t lastx; /* last extent used */ xfs_bmbt_irec_t prev; /* previous extent value */ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) return XFS_ERROR(EIO); if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { *last_block = 0; return 0; } ifp = XFS_IFORK_PTR(ip, whichfork); if (!(ifp->if_flags & XFS_IFEXTENTS) && (error = xfs_iread_extents(tp, ip, whichfork))) return error; bno = *last_block - 1; ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev); if (eof || xfs_bmbt_get_startoff(ep) > bno) { if (prev.br_startoff == NULLFILEOFF) *last_block = 0; else *last_block = prev.br_startoff + prev.br_blockcount; } /* * Otherwise *last_block is already the right answer. */ return 0; } STATIC int xfs_bmap_last_extent( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *rec, int *is_empty) { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); int error; int nextents; if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(tp, ip, whichfork); if (error) return error; } nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); if (nextents == 0) { *is_empty = 1; return 0; } xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec); *is_empty = 0; return 0; } /* * Check the last inode extent to determine whether this allocation will result * in blocks being allocated at the end of the file. When we allocate new data * blocks at the end of the file which do not start at the previous data block, * we will try to align the new blocks at stripe unit boundaries. * * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be * at, or past the EOF. */ STATIC int xfs_bmap_isaeof( struct xfs_bmalloca *bma, int whichfork) { struct xfs_bmbt_irec rec; int is_empty; int error; bma->aeof = 0; error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, &is_empty); if (error || is_empty) return error; /* * Check if we are allocation or past the last extent, or at least into * the last delayed allocated extent. */ bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || (bma->offset >= rec.br_startoff && isnullstartblock(rec.br_startblock)); return 0; } /* * Check if the endoff is outside the last extent. If so the caller will grow * the allocation to a stripe unit boundary. All offsets are considered outside * the end of file for an empty fork, so 1 is returned in *eof in that case. */ int xfs_bmap_eof( struct xfs_inode *ip, xfs_fileoff_t endoff, int whichfork, int *eof) { struct xfs_bmbt_irec rec; int error; error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof); if (error || *eof) return error; *eof = endoff >= rec.br_startoff + rec.br_blockcount; return 0; } /* * Returns the file-relative block number of the first block past eof in * the file. This is not based on i_size, it is based on the extent records. * Returns 0 for local files, as they do not have extent records. */ int xfs_bmap_last_offset( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *last_block, int whichfork) { struct xfs_bmbt_irec rec; int is_empty; int error; *last_block = 0; if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) return 0; if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) return XFS_ERROR(EIO); error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); if (error || is_empty) return error; *last_block = rec.br_startoff + rec.br_blockcount; return 0; } /* * Returns whether the selected fork of the inode has exactly one * block or not. For the data fork we check this matches di_size, * implying the file's range is 0..bsize-1. */ int /* 1=>1 block, 0=>otherwise */ xfs_bmap_one_block( xfs_inode_t *ip, /* incore inode */ int whichfork) /* data or attr fork */ { xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ xfs_ifork_t *ifp; /* inode fork pointer */ int rval; /* return value */ xfs_bmbt_irec_t s; /* internal version of extent */ #ifndef DEBUG if (whichfork == XFS_DATA_FORK) return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; #endif /* !DEBUG */ if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) return 0; if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) return 0; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_get_all(ep, &s); rval = s.br_startoff == 0 && s.br_blockcount == 1; if (rval && whichfork == XFS_DATA_FORK) ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); return rval; } /* * Extent tree manipulation functions used during allocation. */ /* * Convert a delayed allocation to a real allocation. */ STATIC int /* error */ xfs_bmap_add_extent_delay_real( struct xfs_bmalloca *bma) { struct xfs_bmbt_irec *new = &bma->got; int diff; /* temp value */ xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ int error; /* error return value */ int i; /* temp state */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ int rval=0; /* return value (logging flags) */ int state = 0;/* state bits, accessed thru macros */ xfs_filblks_t da_new; /* new count del alloc blocks used */ xfs_filblks_t da_old; /* old count del alloc blocks used */ xfs_filblks_t temp=0; /* value for da_new calculations */ xfs_filblks_t temp2=0;/* value for da_new calculations */ int tmp_rval; /* partial logging flags */ ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK); ASSERT(bma->idx >= 0); ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); ASSERT(!isnullstartblock(new->br_startblock)); ASSERT(!bma->cur || (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); XFS_STATS_INC(xs_add_exlist); #define LEFT r[0] #define RIGHT r[1] #define PREV r[2] /* * Set up a bunch of variables to make the tests simpler. */ ep = xfs_iext_get_ext(ifp, bma->idx); xfs_bmbt_get_all(ep, &PREV); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(PREV.br_startoff <= new->br_startoff); ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); da_old = startblockval(PREV.br_startblock); da_new = 0; /* * Set flags determining what part of the previous delayed allocation * extent is being replaced by a real allocation. */ if (PREV.br_startoff == new->br_startoff) state |= BMAP_LEFT_FILLING; if (PREV.br_startoff + PREV.br_blockcount == new_endoff) state |= BMAP_RIGHT_FILLING; /* * Check and set flags if this segment has a left neighbor. * Don't set contiguous if the combined extent would be too large. */ if (bma->idx > 0) { state |= BMAP_LEFT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT); if (isnullstartblock(LEFT.br_startblock)) state |= BMAP_LEFT_DELAY; } if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && LEFT.br_state == new->br_state && LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) state |= BMAP_LEFT_CONTIG; /* * Check and set flags if this segment has a right neighbor. * Don't set contiguous if the combined extent would be too large. * Also check for all-three-contiguous being too large. */ if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { state |= BMAP_RIGHT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); if (isnullstartblock(RIGHT.br_startblock)) state |= BMAP_RIGHT_DELAY; } if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && new_endoff == RIGHT.br_startoff && new->br_startblock + new->br_blockcount == RIGHT.br_startblock && new->br_state == RIGHT.br_state && new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) != (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING) || LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN)) state |= BMAP_RIGHT_CONTIG; error = 0; /* * Switch out based on the FILLING and CONTIG state bits. */ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Filling in all of a previously delayed allocation extent. * The left and right neighbors are both contiguous with new. */ bma->idx--; trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); bma->ip->i_d.di_nextents--; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, RIGHT.br_startblock, RIGHT.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_btree_delete(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_btree_decrement(bma->cur, 0, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount, LEFT.br_state); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: /* * Filling in all of a previously delayed allocation extent. * The left neighbor is contiguous, the right is not. */ bma->idx--; trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), LEFT.br_blockcount + PREV.br_blockcount); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + PREV.br_blockcount, LEFT.br_state); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Filling in all of a previously delayed allocation extent. * The right neighbor is contiguous, the left is not. */ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, new->br_startblock); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount + RIGHT.br_blockcount); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, RIGHT.br_startblock, RIGHT.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, PREV.br_startoff, new->br_startblock, PREV.br_blockcount + RIGHT.br_blockcount, PREV.br_state); if (error) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Filling in all of a previously delayed allocation extent. * Neither the left nor right neighbors are contiguous with * the new one. */ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, new->br_startblock); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); bma->ip->i_d.di_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: /* * Filling in the first part of a previous delayed allocation. * The left neighbor is contiguous. */ trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); temp = PREV.br_blockcount - new->br_blockcount; trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + new->br_blockcount, LEFT.br_state); if (error) goto done; } da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock)); xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); bma->idx--; break; case BMAP_LEFT_FILLING: /* * Filling in the first part of a previous delayed allocation. * The left neighbor is not contiguous. */ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_startoff(ep, new_endoff); temp = PREV.br_blockcount - new->br_blockcount; xfs_bmbt_set_blockcount(ep, temp); xfs_iext_insert(bma->ip, bma->idx, 1, new, state); bma->ip->i_d.di_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, bma->firstblock, bma->flist, &bma->cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; if (error) goto done; } da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock) - (bma->cur ? bma->cur->bc_private.b.allocated : 0)); ep = xfs_iext_get_ext(ifp, bma->idx + 1); xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); break; case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Filling in the last part of a previous delayed allocation. * The right neighbor is contiguous with the new allocation. */ temp = PREV.br_blockcount - new->br_blockcount; trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1), new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, RIGHT.br_state); trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); if (bma->cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff, RIGHT.br_startblock, RIGHT.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, RIGHT.br_state); if (error) goto done; } da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock)); trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); bma->idx++; break; case BMAP_RIGHT_FILLING: /* * Filling in the last part of a previous delayed allocation. * The right neighbor is not contiguous. */ temp = PREV.br_blockcount - new->br_blockcount; trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); bma->ip->i_d.di_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, bma->firstblock, bma->flist, &bma->cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; if (error) goto done; } da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), startblockval(PREV.br_startblock) - (bma->cur ? bma->cur->bc_private.b.allocated : 0)); ep = xfs_iext_get_ext(ifp, bma->idx); xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); bma->idx++; break; case 0: /* * Filling in the middle part of a previous delayed allocation. * Contiguity is impossible here. * This case is avoided almost all the time. * * We start with a delayed allocation: * * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ * PREV @ idx * * and we are allocating: * +rrrrrrrrrrrrrrrrr+ * new * * and we set it up for insertion as: * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ * new * PREV @ idx LEFT RIGHT * inserted at idx + 1 */ temp = new->br_startoff - PREV.br_startoff; temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ LEFT = *new; RIGHT.br_state = PREV.br_state; RIGHT.br_startblock = nullstartblock( (int)xfs_bmap_worst_indlen(bma->ip, temp2)); RIGHT.br_startoff = new_endoff; RIGHT.br_blockcount = temp2; /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); bma->ip->i_d.di_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, bma->firstblock, bma->flist, &bma->cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; if (error) goto done; } temp = xfs_bmap_worst_indlen(bma->ip, temp); temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - (bma->cur ? bma->cur->bc_private.b.allocated : 0)); if (diff > 0) { error = xfs_icsb_modify_counters(bma->ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), 0); ASSERT(!error); if (error) goto done; } ep = xfs_iext_get_ext(ifp, bma->idx); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2), nullstartblock((int)temp2)); trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); bma->idx++; da_new = temp + temp2; break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_CONTIG: case BMAP_RIGHT_CONTIG: /* * These cases are all impossible. */ ASSERT(0); } /* convert to a btree if necessary */ if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { int tmp_logflags; /* partial log flag return val */ ASSERT(bma->cur == NULL); error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, bma->firstblock, bma->flist, &bma->cur, da_old > 0, &tmp_logflags, XFS_DATA_FORK); bma->logflags |= tmp_logflags; if (error) goto done; } /* adjust for changes in reserved delayed indirect blocks */ if (da_old || da_new) { temp = da_new; if (bma->cur) temp += bma->cur->bc_private.b.allocated; ASSERT(temp <= da_old); if (temp < da_old) xfs_icsb_modify_counters(bma->ip->i_mount, XFS_SBS_FDBLOCKS, (int64_t)(da_old - temp), 0); } /* clear out the allocated field, done with it now in any case. */ if (bma->cur) bma->cur->bc_private.b.allocated = 0; xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK); done: bma->logflags |= rval; return error; #undef LEFT #undef RIGHT #undef PREV } /* * Convert an unwritten allocation to a real allocation or vice versa. */ STATIC int /* error */ xfs_bmap_add_extent_unwritten_real( struct xfs_trans *tp, xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp) /* inode logging flags */ { xfs_btree_cur_t *cur; /* btree cursor */ xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ int error; /* error return value */ int i; /* temp state */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_exntst_t newext; /* new extent state */ xfs_exntst_t oldext; /* old extent state */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ int rval=0; /* return value (logging flags) */ int state = 0;/* state bits, accessed thru macros */ *logflagsp = 0; cur = *curp; ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); ASSERT(*idx >= 0); ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); ASSERT(!isnullstartblock(new->br_startblock)); XFS_STATS_INC(xs_add_exlist); #define LEFT r[0] #define RIGHT r[1] #define PREV r[2] /* * Set up a bunch of variables to make the tests simpler. */ error = 0; ep = xfs_iext_get_ext(ifp, *idx); xfs_bmbt_get_all(ep, &PREV); newext = new->br_state; oldext = (newext == XFS_EXT_UNWRITTEN) ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; ASSERT(PREV.br_state == oldext); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(PREV.br_startoff <= new->br_startoff); ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); /* * Set flags determining what part of the previous oldext allocation * extent is being replaced by a newext allocation. */ if (PREV.br_startoff == new->br_startoff) state |= BMAP_LEFT_FILLING; if (PREV.br_startoff + PREV.br_blockcount == new_endoff) state |= BMAP_RIGHT_FILLING; /* * Check and set flags if this segment has a left neighbor. * Don't set contiguous if the combined extent would be too large. */ if (*idx > 0) { state |= BMAP_LEFT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); if (isnullstartblock(LEFT.br_startblock)) state |= BMAP_LEFT_DELAY; } if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && LEFT.br_state == newext && LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) state |= BMAP_LEFT_CONTIG; /* * Check and set flags if this segment has a right neighbor. * Don't set contiguous if the combined extent would be too large. * Also check for all-three-contiguous being too large. */ if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { state |= BMAP_RIGHT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); if (isnullstartblock(RIGHT.br_startblock)) state |= BMAP_RIGHT_DELAY; } if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && new_endoff == RIGHT.br_startoff && new->br_startblock + new->br_blockcount == RIGHT.br_startblock && newext == RIGHT.br_state && new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) != (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING) || LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN)) state |= BMAP_RIGHT_CONTIG; /* * Switch out based on the FILLING and CONTIG state bits. */ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left and right neighbors are both contiguous with new. */ --*idx; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_iext_remove(ip, *idx + 1, 2, state); ip->i_d.di_nextents -= 2; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, RIGHT.br_startblock, RIGHT.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount, LEFT.br_state))) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left neighbor is contiguous, the right is not. */ --*idx; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), LEFT.br_blockcount + PREV.br_blockcount); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_iext_remove(ip, *idx + 1, 1, state); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + PREV.br_blockcount, LEFT.br_state))) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The right neighbor is contiguous, the left is not. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmbt_set_state(ep, newext); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_iext_remove(ip, *idx + 1, 1, state); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, RIGHT.br_startblock, RIGHT.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext))) goto done; } break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Setting all of a previous oldext extent to newext. * Neither the left nor right neighbors are contiguous with * the new one. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_state(ep, newext); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); if (cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, new->br_startoff, new->br_startblock, new->br_blockcount, newext))) goto done; } break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is contiguous. */ trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, new->br_startblock + new->br_blockcount); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); --*idx; if (cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff + new->br_blockcount, PREV.br_startblock + new->br_blockcount, PREV.br_blockcount - new->br_blockcount, oldext))) goto done; if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; error = xfs_bmbt_update(cur, LEFT.br_startoff, LEFT.br_startblock, LEFT.br_blockcount + new->br_blockcount, LEFT.br_state); if (error) goto done; } break; case BMAP_LEFT_FILLING: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is not contiguous. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); xfs_bmbt_set_startoff(ep, new_endoff); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); xfs_bmbt_set_startblock(ep, new->br_startblock + new->br_blockcount); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_iext_insert(ip, *idx, 1, new, state); ip->i_d.di_nextents++; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff + new->br_blockcount, PREV.br_startblock + new->br_blockcount, PREV.br_blockcount - new->br_blockcount, oldext))) goto done; cur->bc_rec.b = *new; if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } break; case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is contiguous with the new allocation. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); ++*idx; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); if (cur == NULL) rval = XFS_ILOG_DEXT; else { rval = 0; if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount - new->br_blockcount, oldext))) goto done; if ((error = xfs_btree_increment(cur, 0, &i))) goto done; if ((error = xfs_bmbt_update(cur, new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext))) goto done; } break; case BMAP_RIGHT_FILLING: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is not contiguous. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); ++*idx; xfs_iext_insert(ip, *idx, 1, new, state); ip->i_d.di_nextents++; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount - new->br_blockcount, oldext))) goto done; if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } break; case 0: /* * Setting the middle part of a previous oldext extent to * newext. Contiguity is impossible here. * One extent becomes three extents. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, new->br_startoff - PREV.br_startoff); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); r[0] = *new; r[1].br_startoff = new_endoff; r[1].br_blockcount = PREV.br_startoff + PREV.br_blockcount - new_endoff; r[1].br_startblock = new->br_startblock + new->br_blockcount; r[1].br_state = oldext; ++*idx; xfs_iext_insert(ip, *idx, 2, &r[0], state); ip->i_d.di_nextents += 2; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { rval = XFS_ILOG_CORE; if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, PREV.br_startblock, PREV.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); /* new right extent - oldext */ if ((error = xfs_bmbt_update(cur, r[1].br_startoff, r[1].br_startblock, r[1].br_blockcount, r[1].br_state))) goto done; /* new left extent - oldext */ cur->bc_rec.b = PREV; cur->bc_rec.b.br_blockcount = new->br_startoff - PREV.br_startoff; if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); /* * Reset the cursor to the position of the new extent * we are about to insert as we can't trust it after * the previous insert. */ if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); /* new middle extent - newext */ cur->bc_rec.b.br_state = new->br_state; if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: case BMAP_LEFT_CONTIG: case BMAP_RIGHT_CONTIG: /* * These cases are all impossible. */ ASSERT(0); } /* convert to a btree if necessary */ if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) { int tmp_logflags; /* partial log flag return val */ ASSERT(cur == NULL); error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 0, &tmp_logflags, XFS_DATA_FORK); *logflagsp |= tmp_logflags; if (error) goto done; } /* clear out the allocated field, done with it now in any case. */ if (cur) { cur->bc_private.b.allocated = 0; *curp = cur; } xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK); done: *logflagsp |= rval; return error; #undef LEFT #undef RIGHT #undef PREV } /* * Convert a hole to a delayed allocation. */ STATIC void xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_bmbt_irec_t *new) /* new data to add to file extents */ { xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_filblks_t newlen=0; /* new indirect size */ xfs_filblks_t oldlen=0; /* old indirect size */ xfs_bmbt_irec_t right; /* right neighbor extent entry */ int state; /* state bits, accessed thru macros */ xfs_filblks_t temp=0; /* temp for indirect calculations */ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); state = 0; ASSERT(isnullstartblock(new->br_startblock)); /* * Check and set flags if this segment has a left neighbor */ if (*idx > 0) { state |= BMAP_LEFT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); if (isnullstartblock(left.br_startblock)) state |= BMAP_LEFT_DELAY; } /* * Check and set flags if the current (right) segment exists. * If it doesn't exist, we're converting the hole at end-of-file. */ if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { state |= BMAP_RIGHT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); if (isnullstartblock(right.br_startblock)) state |= BMAP_RIGHT_DELAY; } /* * Set contiguity flags on the left and right neighbors. * Don't let extents get too large, even if the pieces are contiguous. */ if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && left.br_startoff + left.br_blockcount == new->br_startoff && left.br_blockcount + new->br_blockcount <= MAXEXTLEN) state |= BMAP_LEFT_CONTIG; if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && new->br_startoff + new->br_blockcount == right.br_startoff && new->br_blockcount + right.br_blockcount <= MAXEXTLEN && (!(state & BMAP_LEFT_CONTIG) || (left.br_blockcount + new->br_blockcount + right.br_blockcount <= MAXEXTLEN))) state |= BMAP_RIGHT_CONTIG; /* * Switch out based on the contiguity flags. */ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with delayed allocations * on the left and on the right. * Merge all three into a single extent record. */ --*idx; temp = left.br_blockcount + new->br_blockcount + right.br_blockcount; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); oldlen = startblockval(left.br_startblock) + startblockval(new->br_startblock) + startblockval(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), nullstartblock((int)newlen)); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_iext_remove(ip, *idx + 1, 1, state); break; case BMAP_LEFT_CONTIG: /* * New allocation is contiguous with a delayed allocation * on the left. * Merge the new allocation with the left neighbor. */ --*idx; temp = left.br_blockcount + new->br_blockcount; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); oldlen = startblockval(left.br_startblock) + startblockval(new->br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), nullstartblock((int)newlen)); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); break; case BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with a delayed allocation * on the right. * Merge the new allocation with the right neighbor. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); temp = new->br_blockcount + right.br_blockcount; oldlen = startblockval(new->br_startblock) + startblockval(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), new->br_startoff, nullstartblock((int)newlen), temp, right.br_state); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); break; case 0: /* * New allocation is not contiguous with another * delayed allocation. * Insert a new entry. */ oldlen = newlen = 0; xfs_iext_insert(ip, *idx, 1, new, state); break; } if (oldlen != newlen) { ASSERT(oldlen > newlen); xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, (int64_t)(oldlen - newlen), 0); /* * Nothing to do for disk quota accounting here. */ } } /* * Convert a hole to a real allocation. */ STATIC int /* error */ xfs_bmap_add_extent_hole_real( struct xfs_bmalloca *bma, int whichfork) { struct xfs_bmbt_irec *new = &bma->got; int error; /* error return value */ int i; /* temp state */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_bmbt_irec_t right; /* right neighbor extent entry */ int rval=0; /* return value (logging flags) */ int state; /* state bits, accessed thru macros */ ifp = XFS_IFORK_PTR(bma->ip, whichfork); ASSERT(bma->idx >= 0); ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); ASSERT(!isnullstartblock(new->br_startblock)); ASSERT(!bma->cur || !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); XFS_STATS_INC(xs_add_exlist); state = 0; if (whichfork == XFS_ATTR_FORK) state |= BMAP_ATTRFORK; /* * Check and set flags if this segment has a left neighbor. */ if (bma->idx > 0) { state |= BMAP_LEFT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left); if (isnullstartblock(left.br_startblock)) state |= BMAP_LEFT_DELAY; } /* * Check and set flags if this segment has a current value. * Not true if we're inserting into the "hole" at eof. */ if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { state |= BMAP_RIGHT_VALID; xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right); if (isnullstartblock(right.br_startblock)) state |= BMAP_RIGHT_DELAY; } /* * We're inserting a real allocation between "left" and "right". * Set the contiguity flags. Don't let extents get too large. */ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && left.br_startoff + left.br_blockcount == new->br_startoff && left.br_startblock + left.br_blockcount == new->br_startblock && left.br_state == new->br_state && left.br_blockcount + new->br_blockcount <= MAXEXTLEN) state |= BMAP_LEFT_CONTIG; if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && new->br_startoff + new->br_blockcount == right.br_startoff && new->br_startblock + new->br_blockcount == right.br_startblock && new->br_state == right.br_state && new->br_blockcount + right.br_blockcount <= MAXEXTLEN && (!(state & BMAP_LEFT_CONTIG) || left.br_blockcount + new->br_blockcount + right.br_blockcount <= MAXEXTLEN)) state |= BMAP_RIGHT_CONTIG; error = 0; /* * Select which case we're in here, and implement it. */ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with real allocations on the * left and on the right. * Merge all three into a single extent record. */ --bma->idx; trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), left.br_blockcount + new->br_blockcount + right.br_blockcount); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); XFS_IFORK_NEXT_SET(bma->ip, whichfork, XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1); if (bma->cur == NULL) { rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); } else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff, right.br_startblock, right.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_btree_delete(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_btree_decrement(bma->cur, 0, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, left.br_startoff, left.br_startblock, left.br_blockcount + new->br_blockcount + right.br_blockcount, left.br_state); if (error) goto done; } break; case BMAP_LEFT_CONTIG: /* * New allocation is contiguous with a real allocation * on the left. * Merge the new allocation with the left neighbor. */ --bma->idx; trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx), left.br_blockcount + new->br_blockcount); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); if (bma->cur == NULL) { rval = xfs_ilog_fext(whichfork); } else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff, left.br_startblock, left.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, left.br_startoff, left.br_startblock, left.br_blockcount + new->br_blockcount, left.br_state); if (error) goto done; } break; case BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with a real allocation * on the right. * Merge the new allocation with the right neighbor. */ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx), new->br_startoff, new->br_startblock, new->br_blockcount + right.br_blockcount, right.br_state); trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); if (bma->cur == NULL) { rval = xfs_ilog_fext(whichfork); } else { rval = 0; error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff, right.br_startblock, right.br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); error = xfs_bmbt_update(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount + right.br_blockcount, right.br_state); if (error) goto done; } break; case 0: /* * New allocation is not contiguous with another * real allocation. * Insert a new entry. */ xfs_iext_insert(bma->ip, bma->idx, 1, new, state); XFS_IFORK_NEXT_SET(bma->ip, whichfork, XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1); if (bma->cur == NULL) { rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); } else { rval = XFS_ILOG_CORE; error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff, new->br_startblock, new->br_blockcount, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); bma->cur->bc_rec.b.br_state = new->br_state; error = xfs_btree_insert(bma->cur, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } break; } /* convert to a btree if necessary */ if (xfs_bmap_needs_btree(bma->ip, whichfork)) { int tmp_logflags; /* partial log flag return val */ ASSERT(bma->cur == NULL); error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, bma->firstblock, bma->flist, &bma->cur, 0, &tmp_logflags, whichfork); bma->logflags |= tmp_logflags; if (error) goto done; } /* clear out the allocated field, done with it now in any case. */ if (bma->cur) bma->cur->bc_private.b.allocated = 0; xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); done: bma->logflags |= rval; return error; } /* * Functions used in the extent read, allocate and remove paths */ /* * Adjust the size of the new extent based on di_extsize and rt extsize. */ STATIC int xfs_bmap_extsize_align( xfs_mount_t *mp, xfs_bmbt_irec_t *gotp, /* next extent pointer */ xfs_bmbt_irec_t *prevp, /* previous extent pointer */ xfs_extlen_t extsz, /* align to this extent size */ int rt, /* is this a realtime inode? */ int eof, /* is extent at end-of-file? */ int delay, /* creating delalloc extent? */ int convert, /* overwriting unwritten extent? */ xfs_fileoff_t *offp, /* in/out: aligned offset */ xfs_extlen_t *lenp) /* in/out: aligned length */ { xfs_fileoff_t orig_off; /* original offset */ xfs_extlen_t orig_alen; /* original length */ xfs_fileoff_t orig_end; /* original off+len */ xfs_fileoff_t nexto; /* next file offset */ xfs_fileoff_t prevo; /* previous file offset */ xfs_fileoff_t align_off; /* temp for offset */ xfs_extlen_t align_alen; /* temp for length */ xfs_extlen_t temp; /* temp for calculations */ if (convert) return 0; orig_off = align_off = *offp; orig_alen = align_alen = *lenp; orig_end = orig_off + orig_alen; /* * If this request overlaps an existing extent, then don't * attempt to perform any additional alignment. */ if (!delay && !eof && (orig_off >= gotp->br_startoff) && (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { return 0; } /* * If the file offset is unaligned vs. the extent size * we need to align it. This will be possible unless * the file was previously written with a kernel that didn't * perform this alignment, or if a truncate shot us in the * foot. */ temp = do_mod(orig_off, extsz); if (temp) { align_alen += temp; align_off -= temp; } /* * Same adjustment for the end of the requested area. */ if ((temp = (align_alen % extsz))) { align_alen += extsz - temp; } /* * If the previous block overlaps with this proposed allocation * then move the start forward without adjusting the length. */ if (prevp->br_startoff != NULLFILEOFF) { if (prevp->br_startblock == HOLESTARTBLOCK) prevo = prevp->br_startoff; else prevo = prevp->br_startoff + prevp->br_blockcount; } else prevo = 0; if (align_off != orig_off && align_off < prevo) align_off = prevo; /* * If the next block overlaps with this proposed allocation * then move the start back without adjusting the length, * but not before offset 0. * This may of course make the start overlap previous block, * and if we hit the offset 0 limit then the next block * can still overlap too. */ if (!eof && gotp->br_startoff != NULLFILEOFF) { if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) nexto = gotp->br_startoff + gotp->br_blockcount; else nexto = gotp->br_startoff; } else nexto = NULLFILEOFF; if (!eof && align_off + align_alen != orig_end && align_off + align_alen > nexto) align_off = nexto > align_alen ? nexto - align_alen : 0; /* * If we're now overlapping the next or previous extent that * means we can't fit an extsz piece in this hole. Just move * the start forward to the first valid spot and set * the length so we hit the end. */ if (align_off != orig_off && align_off < prevo) align_off = prevo; if (align_off + align_alen != orig_end && align_off + align_alen > nexto && nexto != NULLFILEOFF) { ASSERT(nexto > prevo); align_alen = nexto - align_off; } /* * If realtime, and the result isn't a multiple of the realtime * extent size we need to remove blocks until it is. */ if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { /* * We're not covering the original request, or * we won't be able to once we fix the length. */ if (orig_off < align_off || orig_end > align_off + align_alen || align_alen - temp < orig_alen) return XFS_ERROR(EINVAL); /* * Try to fix it by moving the start up. */ if (align_off + temp <= orig_off) { align_alen -= temp; align_off += temp; } /* * Try to fix it by moving the end in. */ else if (align_off + align_alen - temp >= orig_end) align_alen -= temp; /* * Set the start to the minimum then trim the length. */ else { align_alen -= orig_off - align_off; align_off = orig_off; align_alen -= align_alen % mp->m_sb.sb_rextsize; } /* * Result doesn't cover the request, fail it. */ if (orig_off < align_off || orig_end > align_off + align_alen) return XFS_ERROR(EINVAL); } else { ASSERT(orig_off >= align_off); ASSERT(orig_end <= align_off + align_alen); } #ifdef DEBUG if (!eof && gotp->br_startoff != NULLFILEOFF) ASSERT(align_off + align_alen <= gotp->br_startoff); if (prevp->br_startoff != NULLFILEOFF) ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); #endif *lenp = align_alen; *offp = align_off; return 0; } #define XFS_ALLOC_GAP_UNITS 4 STATIC void xfs_bmap_adjacent( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { xfs_fsblock_t adjust; /* adjustment to block numbers */ xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_mount_t *mp; /* mount point structure */ int nullfb; /* true if ap->firstblock isn't set */ int rt; /* true if inode is realtime */ #define ISVALID(x,y) \ (rt ? \ (x) < mp->m_sb.sb_rblocks : \ XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) mp = ap->ip->i_mount; nullfb = *ap->firstblock == NULLFSBLOCK; rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); /* * If allocating at eof, and there's a previous real block, * try to use its last block as our starting point. */ if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && !isnullstartblock(ap->prev.br_startblock) && ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, ap->prev.br_startblock)) { ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; /* * Adjust for the gap between prevp and us. */ adjust = ap->offset - (ap->prev.br_startoff + ap->prev.br_blockcount); if (adjust && ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) ap->blkno += adjust; } /* * If not at eof, then compare the two neighbor blocks. * Figure out whether either one gives us a good starting point, * and pick the better one. */ else if (!ap->eof) { xfs_fsblock_t gotbno; /* right side block number */ xfs_fsblock_t gotdiff=0; /* right side difference */ xfs_fsblock_t prevbno; /* left side block number */ xfs_fsblock_t prevdiff=0; /* left side difference */ /* * If there's a previous (left) block, select a requested * start block based on it. */ if (ap->prev.br_startoff != NULLFILEOFF && !isnullstartblock(ap->prev.br_startblock) && (prevbno = ap->prev.br_startblock + ap->prev.br_blockcount) && ISVALID(prevbno, ap->prev.br_startblock)) { /* * Calculate gap to end of previous block. */ adjust = prevdiff = ap->offset - (ap->prev.br_startoff + ap->prev.br_blockcount); /* * Figure the startblock based on the previous block's * end and the gap size. * Heuristic! * If the gap is large relative to the piece we're * allocating, or using it gives us an invalid block * number, then just use the end of the previous block. */ if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && ISVALID(prevbno + prevdiff, ap->prev.br_startblock)) prevbno += adjust; else prevdiff += adjust; /* * If the firstblock forbids it, can't use it, * must use default. */ if (!rt && !nullfb && XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) prevbno = NULLFSBLOCK; } /* * No previous block or can't follow it, just default. */ else prevbno = NULLFSBLOCK; /* * If there's a following (right) block, select a requested * start block based on it. */ if (!isnullstartblock(ap->got.br_startblock)) { /* * Calculate gap to start of next block. */ adjust = gotdiff = ap->got.br_startoff - ap->offset; /* * Figure the startblock based on the next block's * start and the gap size. */ gotbno = ap->got.br_startblock; /* * Heuristic! * If the gap is large relative to the piece we're * allocating, or using it gives us an invalid block * number, then just use the start of the next block * offset by our length. */ if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && ISVALID(gotbno - gotdiff, gotbno)) gotbno -= adjust; else if (ISVALID(gotbno - ap->length, gotbno)) { gotbno -= ap->length; gotdiff += adjust - ap->length; } else gotdiff += adjust; /* * If the firstblock forbids it, can't use it, * must use default. */ if (!rt && !nullfb && XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) gotbno = NULLFSBLOCK; } /* * No next block, just default. */ else gotbno = NULLFSBLOCK; /* * If both valid, pick the better one, else the only good * one, else ap->blkno is already set (to 0 or the inode block). */ if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; else if (prevbno != NULLFSBLOCK) ap->blkno = prevbno; else if (gotbno != NULLFSBLOCK) ap->blkno = gotbno; } #undef ISVALID } STATIC int xfs_bmap_rtalloc( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { xfs_alloctype_t atype = 0; /* type for allocation routines */ int error; /* error return value */ xfs_mount_t *mp; /* mount point structure */ xfs_extlen_t prod = 0; /* product factor for allocators */ xfs_extlen_t ralen = 0; /* realtime allocation length */ xfs_extlen_t align; /* minimum allocation alignment */ xfs_rtblock_t rtb; mp = ap->ip->i_mount; align = xfs_get_extsz_hint(ap->ip); prod = align / mp->m_sb.sb_rextsize; error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 1, ap->eof, 0, ap->conv, &ap->offset, &ap->length); if (error) return error; ASSERT(ap->length); ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); /* * If the offset & length are not perfectly aligned * then kill prod, it will just get us in trouble. */ if (do_mod(ap->offset, align) || ap->length % align) prod = 1; /* * Set ralen to be the actual requested length in rtextents. */ ralen = ap->length / mp->m_sb.sb_rextsize; /* * If the old value was close enough to MAXEXTLEN that * we rounded up to it, cut it back so it's valid again. * Note that if it's a really large request (bigger than * MAXEXTLEN), we don't hear about that number, and can't * adjust the starting point to match it. */ if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; /* * Lock out other modifications to the RT bitmap inode. */ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); /* * If it's an allocation to an empty file at offset 0, * pick an extent that will space things out in the rt area. */ if (ap->eof && ap->offset == 0) { xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); if (error) return error; ap->blkno = rtx * mp->m_sb.sb_rextsize; } else { ap->blkno = 0; } xfs_bmap_adjacent(ap); /* * Realtime allocation, done through xfs_rtallocate_extent. */ atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; do_div(ap->blkno, mp->m_sb.sb_rextsize); rtb = ap->blkno; ap->length = ralen; if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, &ralen, atype, ap->wasdel, prod, &rtb))) return error; if (rtb == NULLFSBLOCK && prod > 1 && (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, &ralen, atype, ap->wasdel, 1, &rtb))) return error; ap->blkno = rtb; if (ap->blkno != NULLFSBLOCK) { ap->blkno *= mp->m_sb.sb_rextsize; ralen *= mp->m_sb.sb_rextsize; ap->length = ralen; ap->ip->i_d.di_nblocks += ralen; xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); if (ap->wasdel) ap->ip->i_delayed_blks -= ralen; /* * Adjust the disk quota also. This was reserved * earlier. */ xfs_trans_mod_dquot_byino(ap->tp, ap->ip, ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_RTBCOUNT, (long) ralen); } else { ap->length = 0; } return 0; } STATIC int xfs_bmap_btalloc_nullfb( struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen) { struct xfs_mount *mp = ap->ip->i_mount; struct xfs_perag *pag; xfs_agnumber_t ag, startag; int notinit = 0; int error; if (ap->userdata && xfs_inode_is_filestream(ap->ip)) args->type = XFS_ALLOCTYPE_NEAR_BNO; else args->type = XFS_ALLOCTYPE_START_BNO; args->total = ap->total; /* * Search for an allocation group with a single extent large enough * for the request. If one isn't found, then adjust the minimum * allocation size to the largest space found. */ startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); if (startag == NULLAGNUMBER) startag = ag = 0; pag = xfs_perag_get(mp, ag); while (*blen < args->maxlen) { if (!pag->pagf_init) { error = xfs_alloc_pagf_init(mp, args->tp, ag, XFS_ALLOC_FLAG_TRYLOCK); if (error) { xfs_perag_put(pag); return error; } } /* * See xfs_alloc_fix_freelist... */ if (pag->pagf_init) { xfs_extlen_t longest; longest = xfs_alloc_longest_free_extent(mp, pag); if (*blen < longest) *blen = longest; } else notinit = 1; if (xfs_inode_is_filestream(ap->ip)) { if (*blen >= args->maxlen) break; if (ap->userdata) { /* * If startag is an invalid AG, we've * come here once before and * xfs_filestream_new_ag picked the * best currently available. * * Don't continue looping, since we * could loop forever. */ if (startag == NULLAGNUMBER) break; error = xfs_filestream_new_ag(ap, &ag); xfs_perag_put(pag); if (error) return error; /* loop again to set 'blen'*/ startag = NULLAGNUMBER; pag = xfs_perag_get(mp, ag); continue; } } if (++ag == mp->m_sb.sb_agcount) ag = 0; if (ag == startag) break; xfs_perag_put(pag); pag = xfs_perag_get(mp, ag); } xfs_perag_put(pag); /* * Since the above loop did a BUF_TRYLOCK, it is * possible that there is space for this request. */ if (notinit || *blen < ap->minlen) args->minlen = ap->minlen; /* * If the best seen length is less than the request * length, use the best as the minimum. */ else if (*blen < args->maxlen) args->minlen = *blen; /* * Otherwise we've seen an extent as big as maxlen, * use that as the minimum. */ else args->minlen = args->maxlen; /* * set the failure fallback case to look in the selected * AG as the stream may have moved. */ if (xfs_inode_is_filestream(ap->ip)) ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); return 0; } STATIC int xfs_bmap_btalloc( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { xfs_mount_t *mp; /* mount point structure */ xfs_alloctype_t atype = 0; /* type for allocation routines */ xfs_extlen_t align; /* minimum allocation alignment */ xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_agnumber_t ag; xfs_alloc_arg_t args; xfs_extlen_t blen; xfs_extlen_t nextminlen = 0; int nullfb; /* true if ap->firstblock isn't set */ int isaligned; int tryagain; int error; ASSERT(ap->length); mp = ap->ip->i_mount; align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; if (unlikely(align)) { error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, ap->eof, 0, ap->conv, &ap->offset, &ap->length); ASSERT(!error); ASSERT(ap->length); } nullfb = *ap->firstblock == NULLFSBLOCK; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); if (nullfb) { if (ap->userdata && xfs_inode_is_filestream(ap->ip)) { ag = xfs_filestream_lookup_ag(ap->ip); ag = (ag != NULLAGNUMBER) ? ag : 0; ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); } else { ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); } } else ap->blkno = *ap->firstblock; xfs_bmap_adjacent(ap); /* * If allowed, use ap->blkno; otherwise must use firstblock since * it's in the right allocation group. */ if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) ; else ap->blkno = *ap->firstblock; /* * Normal allocation, done through xfs_alloc_vextent. */ tryagain = isaligned = 0; memset(&args, 0, sizeof(args)); args.tp = ap->tp; args.mp = mp; args.fsbno = ap->blkno; /* Trim the allocation back to the maximum an AG can fit. */ args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp)); args.firstblock = *ap->firstblock; blen = 0; if (nullfb) { error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); if (error) return error; } else if (ap->flist->xbf_low) { if (xfs_inode_is_filestream(ap->ip)) args.type = XFS_ALLOCTYPE_FIRST_AG; else args.type = XFS_ALLOCTYPE_START_BNO; args.total = args.minlen = ap->minlen; } else { args.type = XFS_ALLOCTYPE_NEAR_BNO; args.total = ap->total; args.minlen = ap->minlen; } /* apply extent size hints if obtained earlier */ if (unlikely(align)) { args.prod = align; if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) args.mod = (xfs_extlen_t)(args.prod - args.mod); } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { args.prod = 1; args.mod = 0; } else { args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) args.mod = (xfs_extlen_t)(args.prod - args.mod); } /* * If we are not low on available data blocks, and the * underlying logical volume manager is a stripe, and * the file offset is zero then try to allocate data * blocks on stripe unit boundary. * NOTE: ap->aeof is only set if the allocation length * is >= the stripe unit and the allocation offset is * at the end of file. */ if (!ap->flist->xbf_low && ap->aeof) { if (!ap->offset) { args.alignment = mp->m_dalign; atype = args.type; isaligned = 1; /* * Adjust for alignment */ if (blen > args.alignment && blen <= args.maxlen) args.minlen = blen - args.alignment; args.minalignslop = 0; } else { /* * First try an exact bno allocation. * If it fails then do a near or start bno * allocation with alignment turned on. */ atype = args.type; tryagain = 1; args.type = XFS_ALLOCTYPE_THIS_BNO; args.alignment = 1; /* * Compute the minlen+alignment for the * next case. Set slop so that the value * of minlen+alignment+slop doesn't go up * between the calls. */ if (blen > mp->m_dalign && blen <= args.maxlen) nextminlen = blen - mp->m_dalign; else nextminlen = args.minlen; if (nextminlen + mp->m_dalign > args.minlen + 1) args.minalignslop = nextminlen + mp->m_dalign - args.minlen - 1; else args.minalignslop = 0; } } else { args.alignment = 1; args.minalignslop = 0; } args.minleft = ap->minleft; args.wasdel = ap->wasdel; args.isfl = 0; args.userdata = ap->userdata; if ((error = xfs_alloc_vextent(&args))) return error; if (tryagain && args.fsbno == NULLFSBLOCK) { /* * Exact allocation failed. Now try with alignment * turned on. */ args.type = atype; args.fsbno = ap->blkno; args.alignment = mp->m_dalign; args.minlen = nextminlen; args.minalignslop = 0; isaligned = 1; if ((error = xfs_alloc_vextent(&args))) return error; } if (isaligned && args.fsbno == NULLFSBLOCK) { /* * allocation failed, so turn off alignment and * try again. */ args.type = atype; args.fsbno = ap->blkno; args.alignment = 0; if ((error = xfs_alloc_vextent(&args))) return error; } if (args.fsbno == NULLFSBLOCK && nullfb && args.minlen > ap->minlen) { args.minlen = ap->minlen; args.type = XFS_ALLOCTYPE_START_BNO; args.fsbno = ap->blkno; if ((error = xfs_alloc_vextent(&args))) return error; } if (args.fsbno == NULLFSBLOCK && nullfb) { args.fsbno = 0; args.type = XFS_ALLOCTYPE_FIRST_AG; args.total = ap->minlen; args.minleft = 0; if ((error = xfs_alloc_vextent(&args))) return error; ap->flist->xbf_low = 1; } if (args.fsbno != NULLFSBLOCK) { /* * check the allocation happened at the same or higher AG than * the first block that was allocated. */ ASSERT(*ap->firstblock == NULLFSBLOCK || XFS_FSB_TO_AGNO(mp, *ap->firstblock) == XFS_FSB_TO_AGNO(mp, args.fsbno) || (ap->flist->xbf_low && XFS_FSB_TO_AGNO(mp, *ap->firstblock) < XFS_FSB_TO_AGNO(mp, args.fsbno))); ap->blkno = args.fsbno; if (*ap->firstblock == NULLFSBLOCK) *ap->firstblock = args.fsbno; ASSERT(nullfb || fb_agno == args.agno || (ap->flist->xbf_low && fb_agno < args.agno)); ap->length = args.len; ap->ip->i_d.di_nblocks += args.len; xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); if (ap->wasdel) ap->ip->i_delayed_blks -= args.len; /* * Adjust the disk quota also. This was reserved * earlier. */ xfs_trans_mod_dquot_byino(ap->tp, ap->ip, ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, (long) args.len); } else { ap->blkno = NULLFSBLOCK; ap->length = 0; } return 0; } /* * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. * It figures out where to ask the underlying allocator to put the new extent. */ STATIC int xfs_bmap_alloc( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) return xfs_bmap_rtalloc(ap); return xfs_bmap_btalloc(ap); } /* * Trim the returned map to the required bounds */ STATIC void xfs_bmapi_trim_map( struct xfs_bmbt_irec *mval, struct xfs_bmbt_irec *got, xfs_fileoff_t *bno, xfs_filblks_t len, xfs_fileoff_t obno, xfs_fileoff_t end, int n, int flags) { if ((flags & XFS_BMAPI_ENTIRE) || got->br_startoff + got->br_blockcount <= obno) { *mval = *got; if (isnullstartblock(got->br_startblock)) mval->br_startblock = DELAYSTARTBLOCK; return; } if (obno > *bno) *bno = obno; ASSERT((*bno >= obno) || (n == 0)); ASSERT(*bno < end); mval->br_startoff = *bno; if (isnullstartblock(got->br_startblock)) mval->br_startblock = DELAYSTARTBLOCK; else mval->br_startblock = got->br_startblock + (*bno - got->br_startoff); /* * Return the minimum of what we got and what we asked for for * the length. We can use the len variable here because it is * modified below and we could have been there before coming * here if the first part of the allocation didn't overlap what * was asked for. */ mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, got->br_blockcount - (*bno - got->br_startoff)); mval->br_state = got->br_state; ASSERT(mval->br_blockcount <= len); return; } /* * Update and validate the extent map to return */ STATIC void xfs_bmapi_update_map( struct xfs_bmbt_irec **map, xfs_fileoff_t *bno, xfs_filblks_t *len, xfs_fileoff_t obno, xfs_fileoff_t end, int *n, int flags) { xfs_bmbt_irec_t *mval = *map; ASSERT((flags & XFS_BMAPI_ENTIRE) || ((mval->br_startoff + mval->br_blockcount) <= end)); ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || (mval->br_startoff < obno)); *bno = mval->br_startoff + mval->br_blockcount; *len = end - *bno; if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { /* update previous map with new information */ ASSERT(mval->br_startblock == mval[-1].br_startblock); ASSERT(mval->br_blockcount > mval[-1].br_blockcount); ASSERT(mval->br_state == mval[-1].br_state); mval[-1].br_blockcount = mval->br_blockcount; mval[-1].br_state = mval->br_state; } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && mval[-1].br_startblock != DELAYSTARTBLOCK && mval[-1].br_startblock != HOLESTARTBLOCK && mval->br_startblock == mval[-1].br_startblock + mval[-1].br_blockcount && ((flags & XFS_BMAPI_IGSTATE) || mval[-1].br_state == mval->br_state)) { ASSERT(mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount); mval[-1].br_blockcount += mval->br_blockcount; } else if (*n > 0 && mval->br_startblock == DELAYSTARTBLOCK && mval[-1].br_startblock == DELAYSTARTBLOCK && mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount) { mval[-1].br_blockcount += mval->br_blockcount; mval[-1].br_state = mval->br_state; } else if (!((*n == 0) && ((mval->br_startoff + mval->br_blockcount) <= obno))) { mval++; (*n)++; } *map = mval; } /* * Map file blocks to filesystem blocks without allocation. */ int xfs_bmapi_read( struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, struct xfs_bmbt_irec *mval, int *nmap, int flags) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp; struct xfs_bmbt_irec got; struct xfs_bmbt_irec prev; xfs_fileoff_t obno; xfs_fileoff_t end; xfs_extnum_t lastx; int error; int eof; int n = 0; int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; ASSERT(*nmap >= 1); ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| XFS_BMAPI_IGSTATE))); if (unlikely(XFS_TEST_ERROR( (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); XFS_STATS_INC(xs_blk_mapr); ifp = XFS_IFORK_PTR(ip, whichfork); if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(NULL, ip, whichfork); if (error) return error; } xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev); end = bno + len; obno = bno; while (bno < end && n < *nmap) { /* Reading past eof, act as though there's a hole up to end. */ if (eof) got.br_startoff = end; if (got.br_startoff > bno) { /* Reading in a hole. */ mval->br_startoff = bno; mval->br_startblock = HOLESTARTBLOCK; mval->br_blockcount = XFS_FILBLKS_MIN(len, got.br_startoff - bno); mval->br_state = XFS_EXT_NORM; bno += mval->br_blockcount; len -= mval->br_blockcount; mval++; n++; continue; } /* set up the extent map to return. */ xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); /* If we're done, stop now. */ if (bno >= end || n >= *nmap) break; /* Else go on to the next record. */ if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got); else eof = 1; } *nmap = n; return 0; } STATIC int xfs_bmapi_reserve_delalloc( struct xfs_inode *ip, xfs_fileoff_t aoff, xfs_filblks_t len, struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *prev, xfs_extnum_t *lastx, int eof) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_extlen_t alen; xfs_extlen_t indlen; char rt = XFS_IS_REALTIME_INODE(ip); xfs_extlen_t extsz; int error; alen = XFS_FILBLKS_MIN(len, MAXEXTLEN); if (!eof) alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); /* Figure out the extent size, adjust alen */ extsz = xfs_get_extsz_hint(ip); if (extsz) { /* * Make sure we don't exceed a single extent length when we * align the extent by reducing length we are going to * allocate by the maximum amount extent size aligment may * require. */ alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1)); error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, 1, 0, &aoff, &alen); ASSERT(!error); } if (rt) extsz = alen / mp->m_sb.sb_rextsize; /* * Make a transaction-less quota reservation for delayed allocation * blocks. This number gets adjusted later. We return if we haven't * allocated blocks already inside this loop. */ error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); if (error) return error; /* * Split changing sb for alen and indlen since they could be coming * from different places. */ indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); ASSERT(indlen > 0); if (rt) { error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, -((int64_t)extsz), 0); } else { error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -((int64_t)alen), 0); } if (error) goto out_unreserve_quota; error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -((int64_t)indlen), 0); if (error) goto out_unreserve_blocks; ip->i_delayed_blks += alen; got->br_startoff = aoff; got->br_startblock = nullstartblock(indlen); got->br_blockcount = alen; got->br_state = XFS_EXT_NORM; xfs_bmap_add_extent_hole_delay(ip, lastx, got); /* * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay * might have merged it into one of the neighbouring ones. */ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got); ASSERT(got->br_startoff <= aoff); ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen); ASSERT(isnullstartblock(got->br_startblock)); ASSERT(got->br_state == XFS_EXT_NORM); return 0; out_unreserve_blocks: if (rt) xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0); else xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); return error; } /* * Map file blocks to filesystem blocks, adding delayed allocations as needed. */ int xfs_bmapi_delay( struct xfs_inode *ip, /* incore inode */ xfs_fileoff_t bno, /* starting file offs. mapped */ xfs_filblks_t len, /* length to map in file */ struct xfs_bmbt_irec *mval, /* output: map values */ int *nmap, /* i/o: mval size/count */ int flags) /* XFS_BMAPI_... */ { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); struct xfs_bmbt_irec got; /* current file extent record */ struct xfs_bmbt_irec prev; /* previous file extent record */ xfs_fileoff_t obno; /* old block number (offset) */ xfs_fileoff_t end; /* end of mapped file region */ xfs_extnum_t lastx; /* last useful extent number */ int eof; /* we've hit the end of extents */ int n = 0; /* current extent index */ int error = 0; ASSERT(*nmap >= 1); ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); if (unlikely(XFS_TEST_ERROR( (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); XFS_STATS_INC(xs_blk_mapw); if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); if (error) return error; } xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev); end = bno + len; obno = bno; while (bno < end && n < *nmap) { if (eof || got.br_startoff > bno) { error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got, &prev, &lastx, eof); if (error) { if (n == 0) { *nmap = 0; return error; } break; } } /* set up the extent map to return. */ xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); /* If we're done, stop now. */ if (bno >= end || n >= *nmap) break; /* Else go on to the next record. */ prev = got; if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got); else eof = 1; } *nmap = n; return 0; } STATIC int __xfs_bmapi_allocate( struct xfs_bmalloca *bma) { struct xfs_mount *mp = bma->ip->i_mount; int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); int tmp_logflags = 0; int error; int rt; ASSERT(bma->length > 0); rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip); /* * For the wasdelay case, we could also just allocate the stuff asked * for in this bmap call but that wouldn't be as good. */ if (bma->wasdel) { bma->length = (xfs_extlen_t)bma->got.br_blockcount; bma->offset = bma->got.br_startoff; if (bma->idx != NULLEXTNUM && bma->idx) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &bma->prev); } } else { bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); if (!bma->eof) bma->length = XFS_FILBLKS_MIN(bma->length, bma->got.br_startoff - bma->offset); } /* * Indicate if this is the first user data in the file, or just any * user data. */ if (!(bma->flags & XFS_BMAPI_METADATA)) { bma->userdata = (bma->offset == 0) ? XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; } bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; /* * Only want to do the alignment at the eof if it is userdata and * allocation length is larger than a stripe unit. */ if (mp->m_dalign && bma->length >= mp->m_dalign && !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { error = xfs_bmap_isaeof(bma, whichfork); if (error) return error; } error = xfs_bmap_alloc(bma); if (error) return error; if (bma->flist->xbf_low) bma->minleft = 0; if (bma->cur) bma->cur->bc_private.b.firstblock = *bma->firstblock; if (bma->blkno == NULLFSBLOCK) return 0; if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); bma->cur->bc_private.b.firstblock = *bma->firstblock; bma->cur->bc_private.b.flist = bma->flist; } /* * Bump the number of extents we've allocated * in this call. */ bma->nallocs++; if (bma->cur) bma->cur->bc_private.b.flags = bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; bma->got.br_startoff = bma->offset; bma->got.br_startblock = bma->blkno; bma->got.br_blockcount = bma->length; bma->got.br_state = XFS_EXT_NORM; /* * A wasdelay extent has been initialized, so shouldn't be flagged * as unwritten. */ if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) && xfs_sb_version_hasextflgbit(&mp->m_sb)) bma->got.br_state = XFS_EXT_UNWRITTEN; if (bma->wasdel) error = xfs_bmap_add_extent_delay_real(bma); else error = xfs_bmap_add_extent_hole_real(bma, whichfork); bma->logflags |= tmp_logflags; if (error) return error; /* * Update our extent pointer, given that xfs_bmap_add_extent_delay_real * or xfs_bmap_add_extent_hole_real might have merged it into one of * the neighbouring ones. */ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); ASSERT(bma->got.br_startoff <= bma->offset); ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= bma->offset + bma->length); ASSERT(bma->got.br_state == XFS_EXT_NORM || bma->got.br_state == XFS_EXT_UNWRITTEN); return 0; } static void xfs_bmapi_allocate_worker( struct work_struct *work) { struct xfs_bmalloca *args = container_of(work, struct xfs_bmalloca, work); unsigned long pflags; /* we are in a transaction context here */ current_set_flags_nested(&pflags, PF_FSTRANS); args->result = __xfs_bmapi_allocate(args); complete(args->done); current_restore_flags_nested(&pflags, PF_FSTRANS); } /* * Some allocation requests often come in with little stack to work on. Push * them off to a worker thread so there is lots of stack to use. Otherwise just * call directly to avoid the context switch overhead here. */ int xfs_bmapi_allocate( struct xfs_bmalloca *args) { DECLARE_COMPLETION_ONSTACK(done); if (!args->stack_switch) return __xfs_bmapi_allocate(args); args->done = &done; INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); queue_work(xfs_alloc_wq, &args->work); wait_for_completion(&done); return args->result; } STATIC int xfs_bmapi_convert_unwritten( struct xfs_bmalloca *bma, struct xfs_bmbt_irec *mval, xfs_filblks_t len, int flags) { int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); int tmp_logflags = 0; int error; /* check if we need to do unwritten->real conversion */ if (mval->br_state == XFS_EXT_UNWRITTEN && (flags & XFS_BMAPI_PREALLOC)) return 0; /* check if we need to do real->unwritten conversion */ if (mval->br_state == XFS_EXT_NORM && (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) return 0; /* * Modify (by adding) the state flag, if writing. */ ASSERT(mval->br_blockcount <= len); if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, bma->ip, whichfork); bma->cur->bc_private.b.firstblock = *bma->firstblock; bma->cur->bc_private.b.flist = bma->flist; } mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx, &bma->cur, mval, bma->firstblock, bma->flist, &tmp_logflags); bma->logflags |= tmp_logflags; if (error) return error; /* * Update our extent pointer, given that * xfs_bmap_add_extent_unwritten_real might have merged it into one * of the neighbouring ones. */ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got); /* * We may have combined previously unwritten space with written space, * so generate another request. */ if (mval->br_blockcount < len) return EAGAIN; return 0; } /* * Map file blocks to filesystem blocks, and allocate blocks or convert the * extent state if necessary. Details behaviour is controlled by the flags * parameter. Only allocates blocks from a single allocation group, to avoid * locking problems. * * The returned value in "firstblock" from the first call in a transaction * must be remembered and presented to subsequent calls in "firstblock". * An upper bound for the number of blocks to be allocated is supplied to * the first call in "total"; if no allocation group has that many free * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). */ int xfs_bmapi_write( struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode */ xfs_fileoff_t bno, /* starting file offs. mapped */ xfs_filblks_t len, /* length to map in file */ int flags, /* XFS_BMAPI_... */ xfs_fsblock_t *firstblock, /* first allocated block controls a.g. for allocs */ xfs_extlen_t total, /* total blocks needed */ struct xfs_bmbt_irec *mval, /* output: map values */ int *nmap, /* i/o: mval size/count */ struct xfs_bmap_free *flist) /* i/o: list extents to free */ { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp; struct xfs_bmalloca bma = { 0 }; /* args for xfs_bmap_alloc */ xfs_fileoff_t end; /* end of mapped file region */ int eof; /* after the end of extents */ int error; /* error return */ int n; /* current extent index */ xfs_fileoff_t obno; /* old block number (offset) */ int whichfork; /* data or attr fork */ char inhole; /* current location is hole in file */ char wasdelay; /* old extent was delayed */ #ifdef DEBUG xfs_fileoff_t orig_bno; /* original block number value */ int orig_flags; /* original flags arg value */ xfs_filblks_t orig_len; /* original value of len arg */ struct xfs_bmbt_irec *orig_mval; /* original value of mval */ int orig_nmap; /* original value of *nmap */ orig_bno = bno; orig_len = len; orig_flags = flags; orig_mval = mval; orig_nmap = *nmap; #endif ASSERT(*nmap >= 1); ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(!(flags & XFS_BMAPI_IGSTATE)); ASSERT(tp != NULL); ASSERT(len > 0); whichfork = (flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; if (unlikely(XFS_TEST_ERROR( (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL), mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); ifp = XFS_IFORK_PTR(ip, whichfork); XFS_STATS_INC(xs_blk_mapw); if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { /* * XXX (dgc): This assumes we are only called for inodes that * contain content neutral data in local format. Anything that * contains caller-specific data in local format that needs * transformation to move to a block format needs to do the * conversion to extent format itself. * * Directory data forks and attribute forks handle this * themselves, but with the addition of metadata verifiers every * data fork in local format now contains caller specific data * and as such conversion through this function is likely to be * broken. * * The only likely user of this branch is for remote symlinks, * but we cannot overwrite the data fork contents of the symlink * (EEXIST occurs higher up the stack) and so it will never go * from local format to extent format here. Hence I don't think * this branch is ever executed intentionally and we should * consider removing it and asserting that xfs_bmapi_write() * cannot be called directly on local format forks. i.e. callers * are completely responsible for local to extent format * conversion, not xfs_bmapi_write(). */ error = xfs_bmap_local_to_extents(tp, ip, firstblock, total, &bma.logflags, whichfork, xfs_bmap_local_to_extents_init_fn); if (error) goto error0; } if (*firstblock == NULLFSBLOCK) { if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; else bma.minleft = 1; } else { bma.minleft = 0; } if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(tp, ip, whichfork); if (error) goto error0; } xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got, &bma.prev); n = 0; end = bno + len; obno = bno; bma.tp = tp; bma.ip = ip; bma.total = total; bma.userdata = 0; bma.flist = flist; bma.firstblock = firstblock; if (flags & XFS_BMAPI_STACK_SWITCH) bma.stack_switch = 1; while (bno < end && n < *nmap) { inhole = eof || bma.got.br_startoff > bno; wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); /* * First, deal with the hole before the allocated space * that we found, if any. */ if (inhole || wasdelay) { bma.eof = eof; bma.conv = !!(flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; bma.offset = bno; bma.flags = flags; /* * There's a 32/64 bit type mismatch between the * allocation length request (which can be 64 bits in * length) and the bma length request, which is * xfs_extlen_t and therefore 32 bits. Hence we have to * check for 32-bit overflows and handle them here. */ if (len > (xfs_filblks_t)MAXEXTLEN) bma.length = MAXEXTLEN; else bma.length = len; ASSERT(len > 0); ASSERT(bma.length > 0); error = xfs_bmapi_allocate(&bma); if (error) goto error0; if (bma.blkno == NULLFSBLOCK) break; } /* Deal with the allocated space we found. */ xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, end, n, flags); /* Execute unwritten extent conversion if necessary */ error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); if (error == EAGAIN) continue; if (error) goto error0; /* update the extent map to return */ xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); /* * If we're done, stop now. Stop when we've allocated * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise * the transaction may get too big. */ if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) break; /* Else go on to the next record. */ bma.prev = bma.got; if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) { xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx), &bma.got); } else eof = 1; } *nmap = n; /* * Transform from btree to extents, give it cur. */ if (xfs_bmap_wants_extents(ip, whichfork)) { int tmp_logflags = 0; ASSERT(bma.cur); error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &tmp_logflags, whichfork); bma.logflags |= tmp_logflags; if (error) goto error0; } ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || XFS_IFORK_NEXTENTS(ip, whichfork) > XFS_IFORK_MAXEXT(ip, whichfork)); error = 0; error0: /* * Log everything. Do this after conversion, there's no point in * logging the extent records if we've converted to btree format. */ if ((bma.logflags & xfs_ilog_fext(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) bma.logflags &= ~xfs_ilog_fext(whichfork); else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) bma.logflags &= ~xfs_ilog_fbroot(whichfork); /* * Log whatever the flags say, even if error. Otherwise we might miss * detecting a case where the data is changed, there's an error, * and it's not logged so we don't shutdown when we should. */ if (bma.logflags) xfs_trans_log_inode(tp, ip, bma.logflags); if (bma.cur) { if (!error) { ASSERT(*firstblock == NULLFSBLOCK || XFS_FSB_TO_AGNO(mp, *firstblock) == XFS_FSB_TO_AGNO(mp, bma.cur->bc_private.b.firstblock) || (flist->xbf_low && XFS_FSB_TO_AGNO(mp, *firstblock) < XFS_FSB_TO_AGNO(mp, bma.cur->bc_private.b.firstblock))); *firstblock = bma.cur->bc_private.b.firstblock; } xfs_btree_del_cursor(bma.cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); } if (!error) xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, orig_nmap, *nmap); return error; } /* * Called by xfs_bmapi to update file extent records and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ xfs_bmap_del_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_trans_t *tp, /* current transaction pointer */ xfs_extnum_t *idx, /* extent number to update/delete */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ xfs_bmbt_irec_t *del, /* data to remove from extents */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ xfs_fsblock_t del_endblock=0; /* first block past del */ xfs_fileoff_t del_endoff; /* first offset past del */ int delay; /* current block is delayed allocated */ int do_fx; /* free extent at end of routine */ xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ int error; /* error return value */ int flags; /* inode logging flags */ xfs_bmbt_irec_t got; /* current extent entry */ xfs_fileoff_t got_endoff; /* first offset past got */ int i; /* temp state */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_mount_t *mp; /* mount structure */ xfs_filblks_t nblks; /* quota/sb block count */ xfs_bmbt_irec_t new; /* new record to be inserted */ /* REFERENCED */ uint qfield; /* quota field to update */ xfs_filblks_t temp; /* for indirect length calculations */ xfs_filblks_t temp2; /* for indirect length calculations */ int state = 0; XFS_STATS_INC(xs_del_exlist); if (whichfork == XFS_ATTR_FORK) state |= BMAP_ATTRFORK; mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT((*idx >= 0) && (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(del->br_blockcount > 0); ep = xfs_iext_get_ext(ifp, *idx); xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= del->br_startoff); del_endoff = del->br_startoff + del->br_blockcount; got_endoff = got.br_startoff + got.br_blockcount; ASSERT(got_endoff >= del_endoff); delay = isnullstartblock(got.br_startblock); ASSERT(isnullstartblock(del->br_startblock) == delay); flags = 0; qfield = 0; error = 0; /* * If deleting a real allocation, must free up the disk space. */ if (!delay) { flags = XFS_ILOG_CORE; /* * Realtime allocation. Free it and record di_nblocks update. */ if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { xfs_fsblock_t bno; xfs_filblks_t len; ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0); ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0); bno = del->br_startblock; len = del->br_blockcount; do_div(bno, mp->m_sb.sb_rextsize); do_div(len, mp->m_sb.sb_rextsize); error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); if (error) goto done; do_fx = 0; nblks = len * mp->m_sb.sb_rextsize; qfield = XFS_TRANS_DQ_RTBCOUNT; } /* * Ordinary allocation. */ else { do_fx = 1; nblks = del->br_blockcount; qfield = XFS_TRANS_DQ_BCOUNT; } /* * Set up del_endblock and cur for later. */ del_endblock = del->br_startblock + del->br_blockcount; if (cur) { if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, got.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } da_old = da_new = 0; } else { da_old = startblockval(got.br_startblock); da_new = 0; nblks = 0; do_fx = 0; } /* * Set flag value to use in switch statement. * Left-contig is 2, right-contig is 1. */ switch (((got.br_startoff == del->br_startoff) << 1) | (got_endoff == del_endoff)) { case 3: /* * Matches the whole extent. Delete the entry. */ xfs_iext_remove(ip, *idx, 1, whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); --*idx; if (delay) break; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) - 1); flags |= XFS_ILOG_CORE; if (!cur) { flags |= xfs_ilog_fext(whichfork); break; } if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); break; case 2: /* * Deleting the first part of the extent. */ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_startoff(ep, del_endoff); temp = got.br_blockcount - del->br_blockcount; xfs_bmbt_set_blockcount(ep, temp); if (delay) { temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), da_old); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); da_new = temp; break; } xfs_bmbt_set_startblock(ep, del_endblock); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); if (!cur) { flags |= xfs_ilog_fext(whichfork); break; } if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, got.br_blockcount - del->br_blockcount, got.br_state))) goto done; break; case 1: /* * Deleting the last part of the extent. */ temp = got.br_blockcount - del->br_blockcount; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); if (delay) { temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), da_old); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); da_new = temp; break; } trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); if (!cur) { flags |= xfs_ilog_fext(whichfork); break; } if ((error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, got.br_blockcount - del->br_blockcount, got.br_state))) goto done; break; case 0: /* * Deleting the middle of the extent. */ temp = del->br_startoff - got.br_startoff; trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); new.br_startoff = del_endoff; temp2 = got_endoff - del_endoff; new.br_blockcount = temp2; new.br_state = got.br_state; if (!delay) { new.br_startblock = del_endblock; flags |= XFS_ILOG_CORE; if (cur) { if ((error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, temp, got.br_state))) goto done; if ((error = xfs_btree_increment(cur, 0, &i))) goto done; cur->bc_rec.b = new; error = xfs_btree_insert(cur, &i); if (error && error != ENOSPC) goto done; /* * If get no-space back from btree insert, * it tried a split, and we have a zero * block reservation. * Fix up our state and return the error. */ if (error == ENOSPC) { /* * Reset the cursor, don't trust * it after any insert operation. */ if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, temp, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); /* * Update the btree record back * to the original value. */ if ((error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, got.br_blockcount, got.br_state))) goto done; /* * Reset the extent record back * to the original value. */ xfs_bmbt_set_blockcount(ep, got.br_blockcount); flags = 0; error = XFS_ERROR(ENOSPC); goto done; } XFS_WANT_CORRUPTED_GOTO(i == 1, done); } else flags |= xfs_ilog_fext(whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); } else { ASSERT(whichfork == XFS_DATA_FORK); temp = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); temp2 = xfs_bmap_worst_indlen(ip, temp2); new.br_startblock = nullstartblock((int)temp2); da_new = temp + temp2; while (da_new > da_old) { if (temp) { temp--; da_new--; xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); } if (da_new == da_old) break; if (temp2) { temp2--; da_new--; new.br_startblock = nullstartblock((int)temp2); } } } trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_iext_insert(ip, *idx + 1, 1, &new, state); ++*idx; break; } /* * If we need to, add to list of extents to delete. */ if (do_fx) xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist, mp); /* * Adjust inode # blocks in the file. */ if (nblks) ip->i_d.di_nblocks -= nblks; /* * Adjust quota data. */ if (qfield) xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); /* * Account for change in delayed indirect blocks. * Nothing to do for disk quota accounting here. */ ASSERT(da_old >= da_new); if (da_old > da_new) { xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new), 0); } done: *logflagsp = flags; return error; } /* * Unmap (remove) blocks from a file. * If nexts is nonzero then the number of extents to remove is limited to * that value. If not all extents in the block range can be removed then * *done is set. */ int /* error */ xfs_bunmapi( xfs_trans_t *tp, /* transaction pointer */ struct xfs_inode *ip, /* incore inode */ xfs_fileoff_t bno, /* starting offset to unmap */ xfs_filblks_t len, /* length to unmap in file */ int flags, /* misc flags */ xfs_extnum_t nexts, /* number of extents max */ xfs_fsblock_t *firstblock, /* first allocated block controls a.g. for allocs */ xfs_bmap_free_t *flist, /* i/o: list extents to free */ int *done) /* set if not done yet */ { xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmbt_irec_t del; /* extent being deleted */ int eof; /* is deleting at eof */ xfs_bmbt_rec_host_t *ep; /* extent record pointer */ int error; /* error return value */ xfs_extnum_t extno; /* extent number in list */ xfs_bmbt_irec_t got; /* current extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ int isrt; /* freeing in rt area */ xfs_extnum_t lastx; /* last extent index used */ int logflags; /* transaction logging flags */ xfs_extlen_t mod; /* rt extent offset */ xfs_mount_t *mp; /* mount structure */ xfs_extnum_t nextents; /* number of file extents */ xfs_bmbt_irec_t prev; /* previous extent record */ xfs_fileoff_t start; /* first file offset deleted */ int tmp_logflags; /* partial logging flags */ int wasdel; /* was a delayed alloc extent */ int whichfork; /* data or attribute fork */ xfs_fsblock_t sum; trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); whichfork = (flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; ifp = XFS_IFORK_PTR(ip, whichfork); if (unlikely( XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } mp = ip->i_mount; if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); ASSERT(len > 0); ASSERT(nexts >= 0); if (!(ifp->if_flags & XFS_IFEXTENTS) && (error = xfs_iread_extents(tp, ip, whichfork))) return error; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); if (nextents == 0) { *done = 1; return 0; } XFS_STATS_INC(xs_blk_unmap); isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); start = bno; bno = start + len - 1; ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev); /* * Check to see if the given block number is past the end of the * file, back up to the last block if so... */ if (eof) { ep = xfs_iext_get_ext(ifp, --lastx); xfs_bmbt_get_all(ep, &got); bno = got.br_startoff + got.br_blockcount - 1; } logflags = 0; if (ifp->if_flags & XFS_IFBROOT) { ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.flist = flist; cur->bc_private.b.flags = 0; } else cur = NULL; if (isrt) { /* * Synchronize by locking the bitmap inode. */ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); } extno = 0; while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && (nexts == 0 || extno < nexts)) { /* * Is the found extent after a hole in which bno lives? * Just back up to the previous extent, if so. */ if (got.br_startoff > bno) { if (--lastx < 0) break; ep = xfs_iext_get_ext(ifp, lastx); xfs_bmbt_get_all(ep, &got); } /* * Is the last block of this extent before the range * we're supposed to delete? If so, we're done. */ bno = XFS_FILEOFF_MIN(bno, got.br_startoff + got.br_blockcount - 1); if (bno < start) break; /* * Then deal with the (possibly delayed) allocated space * we found. */ ASSERT(ep != NULL); del = got; wasdel = isnullstartblock(del.br_startblock); if (got.br_startoff < start) { del.br_startoff = start; del.br_blockcount -= start - got.br_startoff; if (!wasdel) del.br_startblock += start - got.br_startoff; } if (del.br_startoff + del.br_blockcount > bno + 1) del.br_blockcount = bno + 1 - del.br_startoff; sum = del.br_startblock + del.br_blockcount; if (isrt && (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { /* * Realtime extent not lined up at the end. * The extent could have been split into written * and unwritten pieces, or we could just be * unmapping part of it. But we can't really * get rid of part of a realtime extent. */ if (del.br_state == XFS_EXT_UNWRITTEN || !xfs_sb_version_hasextflgbit(&mp->m_sb)) { /* * This piece is unwritten, or we're not * using unwritten extents. Skip over it. */ ASSERT(bno >= mod); bno -= mod > del.br_blockcount ? del.br_blockcount : mod; if (bno < got.br_startoff) { if (--lastx >= 0) xfs_bmbt_get_all(xfs_iext_get_ext( ifp, lastx), &got); } continue; } /* * It's written, turn it unwritten. * This is better than zeroing it. */ ASSERT(del.br_state == XFS_EXT_NORM); ASSERT(xfs_trans_get_block_res(tp) > 0); /* * If this spans a realtime extent boundary, * chop it back to the start of the one we end at. */ if (del.br_blockcount > mod) { del.br_startoff += del.br_blockcount - mod; del.br_startblock += del.br_blockcount - mod; del.br_blockcount = mod; } del.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, &lastx, &cur, &del, firstblock, flist, &logflags); if (error) goto error0; goto nodelete; } if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { /* * Realtime extent is lined up at the end but not * at the front. We'll get rid of full extents if * we can. */ mod = mp->m_sb.sb_rextsize - mod; if (del.br_blockcount > mod) { del.br_blockcount -= mod; del.br_startoff += mod; del.br_startblock += mod; } else if ((del.br_startoff == start && (del.br_state == XFS_EXT_UNWRITTEN || xfs_trans_get_block_res(tp) == 0)) || !xfs_sb_version_hasextflgbit(&mp->m_sb)) { /* * Can't make it unwritten. There isn't * a full extent here so just skip it. */ ASSERT(bno >= del.br_blockcount); bno -= del.br_blockcount; if (got.br_startoff > bno) { if (--lastx >= 0) { ep = xfs_iext_get_ext(ifp, lastx); xfs_bmbt_get_all(ep, &got); } } continue; } else if (del.br_state == XFS_EXT_UNWRITTEN) { /* * This one is already unwritten. * It must have a written left neighbor. * Unwrite the killed part of that one and * try again. */ ASSERT(lastx > 0); xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), &prev); ASSERT(prev.br_state == XFS_EXT_NORM); ASSERT(!isnullstartblock(prev.br_startblock)); ASSERT(del.br_startblock == prev.br_startblock + prev.br_blockcount); if (prev.br_startoff < start) { mod = start - prev.br_startoff; prev.br_blockcount -= mod; prev.br_startblock += mod; prev.br_startoff = start; } prev.br_state = XFS_EXT_UNWRITTEN; lastx--; error = xfs_bmap_add_extent_unwritten_real(tp, ip, &lastx, &cur, &prev, firstblock, flist, &logflags); if (error) goto error0; goto nodelete; } else { ASSERT(del.br_state == XFS_EXT_NORM); del.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, &lastx, &cur, &del, firstblock, flist, &logflags); if (error) goto error0; goto nodelete; } } if (wasdel) { ASSERT(startblockval(del.br_startblock) > 0); /* Update realtime/data freespace, unreserve quota */ if (isrt) { xfs_filblks_t rtexts; rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); do_div(rtexts, mp->m_sb.sb_rextsize); xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, (int64_t)rtexts, 0); (void)xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del.br_blockcount), 0, XFS_QMOPT_RES_RTBLKS); } else { xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, (int64_t)del.br_blockcount, 0); (void)xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del.br_blockcount), 0, XFS_QMOPT_RES_REGBLKS); } ip->i_delayed_blks -= del.br_blockcount; if (cur) cur->bc_private.b.flags |= XFS_BTCUR_BPRV_WASDEL; } else if (cur) cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; /* * If it's the case where the directory code is running * with no block reservation, and the deleted block is in * the middle of its extent, and the resulting insert * of an extent would cause transformation to btree format, * then reject it. The calling code will then swap * blocks around instead. * We have to do this now, rather than waiting for the * conversion to btree format, since the transaction * will be dirty. */ if (!wasdel && xfs_trans_get_block_res(tp) == 0 && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ XFS_IFORK_MAXEXT(ip, whichfork) && del.br_startoff > got.br_startoff && del.br_startoff + del.br_blockcount < got.br_startoff + got.br_blockcount) { error = XFS_ERROR(ENOSPC); goto error0; } error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, &tmp_logflags, whichfork); logflags |= tmp_logflags; if (error) goto error0; bno = del.br_startoff - 1; nodelete: /* * If not done go on to the next (previous) record. */ if (bno != (xfs_fileoff_t)-1 && bno >= start) { if (lastx >= 0) { ep = xfs_iext_get_ext(ifp, lastx); if (xfs_bmbt_get_startoff(ep) > bno) { if (--lastx >= 0) ep = xfs_iext_get_ext(ifp, lastx); } xfs_bmbt_get_all(ep, &got); } extno++; } } *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; /* * Convert to a btree if necessary. */ if (xfs_bmap_needs_btree(ip, whichfork)) { ASSERT(cur == NULL); error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, &tmp_logflags, whichfork); logflags |= tmp_logflags; if (error) goto error0; } /* * transform from btree to extents, give it cur */ else if (xfs_bmap_wants_extents(ip, whichfork)) { ASSERT(cur != NULL); error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, whichfork); logflags |= tmp_logflags; if (error) goto error0; } /* * transform from extents to local? */ error = 0; error0: /* * Log everything. Do this after conversion, there's no point in * logging the extent records if we've converted to btree format. */ if ((logflags & xfs_ilog_fext(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) logflags &= ~xfs_ilog_fext(whichfork); else if ((logflags & xfs_ilog_fbroot(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) logflags &= ~xfs_ilog_fbroot(whichfork); /* * Log inode even in the error case, if the transaction * is dirty we'll need to shut down the filesystem. */ if (logflags) xfs_trans_log_inode(tp, ip, logflags); if (cur) { if (!error) { *firstblock = cur->bc_private.b.firstblock; cur->bc_private.b.allocated = 0; } xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); } return error; } /* * returns 1 for success, 0 if we failed to map the extent. */ STATIC int xfs_getbmapx_fix_eof_hole( xfs_inode_t *ip, /* xfs incore inode pointer */ struct getbmapx *out, /* output structure */ int prealloced, /* this is a file with * preallocated data space */ __int64_t end, /* last block requested */ xfs_fsblock_t startblock) { __int64_t fixlen; xfs_mount_t *mp; /* file system mount point */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t lastx; /* last extent pointer */ xfs_fileoff_t fileblock; if (startblock == HOLESTARTBLOCK) { mp = ip->i_mount; out->bmv_block = -1; fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); fixlen -= out->bmv_offset; if (prealloced && out->bmv_offset + out->bmv_length == end) { /* Came to hole at EOF. Trim it. */ if (fixlen <= 0) return 0; out->bmv_length = fixlen; } } else { if (startblock == DELAYSTARTBLOCK) out->bmv_block = -2; else out->bmv_block = xfs_fsb_to_db(ip, startblock); fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1)) out->bmv_oflags |= BMV_OF_LAST; } return 1; } /* * Get inode's extents as described in bmv, and format for output. * Calls formatter to fill the user's buffer until all extents * are mapped, until the passed-in bmv->bmv_count slots have * been filled, or until the formatter short-circuits the loop, * if it is tracking filled-in extents on its own. */ int /* error code */ xfs_getbmap( xfs_inode_t *ip, struct getbmapx *bmv, /* user bmap structure */ xfs_bmap_format_t formatter, /* format to user */ void *arg) /* formatter arg */ { __int64_t bmvend; /* last block requested */ int error = 0; /* return value */ __int64_t fixlen; /* length for -1 case */ int i; /* extent number */ int lock; /* lock state */ xfs_bmbt_irec_t *map; /* buffer for user's data */ xfs_mount_t *mp; /* file system mount point */ int nex; /* # of user extents can do */ int nexleft; /* # of user extents left */ int subnex; /* # of bmapi's can do */ int nmap; /* number of map entries */ struct getbmapx *out; /* output structure */ int whichfork; /* data or attr fork */ int prealloced; /* this is a file with * preallocated data space */ int iflags; /* interface flags */ int bmapi_flags; /* flags for xfs_bmapi */ int cur_ext = 0; mp = ip->i_mount; iflags = bmv->bmv_iflags; whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; if (whichfork == XFS_ATTR_FORK) { if (XFS_IFORK_Q(ip)) { if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) return XFS_ERROR(EINVAL); } else if (unlikely( ip->i_d.di_aformat != 0 && ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } prealloced = 0; fixlen = 1LL << 32; } else { if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && ip->i_d.di_format != XFS_DINODE_FMT_BTREE && ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) return XFS_ERROR(EINVAL); if (xfs_get_extsz_hint(ip) || ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ prealloced = 1; fixlen = mp->m_super->s_maxbytes; } else { prealloced = 0; fixlen = XFS_ISIZE(ip); } } if (bmv->bmv_length == -1) { fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); bmv->bmv_length = max_t(__int64_t, fixlen - bmv->bmv_offset, 0); } else if (bmv->bmv_length == 0) { bmv->bmv_entries = 0; return 0; } else if (bmv->bmv_length < 0) { return XFS_ERROR(EINVAL); } nex = bmv->bmv_count - 1; if (nex <= 0) return XFS_ERROR(EINVAL); bmvend = bmv->bmv_offset + bmv->bmv_length; if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) return XFS_ERROR(ENOMEM); out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL); if (!out) { out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx)); if (!out) return XFS_ERROR(ENOMEM); } xfs_ilock(ip, XFS_IOLOCK_SHARED); if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); if (error) goto out_unlock_iolock; } /* * even after flushing the inode, there can still be delalloc * blocks on the inode beyond EOF due to speculative * preallocation. These are not removed until the release * function is called or the inode is inactivated. Hence we * cannot assert here that ip->i_delayed_blks == 0. */ } lock = xfs_ilock_map_shared(ip); /* * Don't let nex be bigger than the number of extents * we can have assuming alternating holes and real extents. */ if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; bmapi_flags = xfs_bmapi_aflag(whichfork); if (!(iflags & BMV_IF_PREALLOC)) bmapi_flags |= XFS_BMAPI_IGSTATE; /* * Allocate enough space to handle "subnex" maps at a time. */ error = ENOMEM; subnex = 16; map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); if (!map) goto out_unlock_ilock; bmv->bmv_entries = 0; if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) { error = 0; goto out_free_map; } nexleft = nex; do { nmap = (nexleft > subnex) ? subnex : nexleft; error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), XFS_BB_TO_FSB(mp, bmv->bmv_length), map, &nmap, bmapi_flags); if (error) goto out_free_map; ASSERT(nmap <= subnex); for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { out[cur_ext].bmv_oflags = 0; if (map[i].br_state == XFS_EXT_UNWRITTEN) out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; else if (map[i].br_startblock == DELAYSTARTBLOCK) out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC; out[cur_ext].bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); out[cur_ext].bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); out[cur_ext].bmv_unused1 = 0; out[cur_ext].bmv_unused2 = 0; /* * delayed allocation extents that start beyond EOF can * occur due to speculative EOF allocation when the * delalloc extent is larger than the largest freespace * extent at conversion time. These extents cannot be * converted by data writeback, so can exist here even * if we are not supposed to be finding delalloc * extents. */ if (map[i].br_startblock == DELAYSTARTBLOCK && map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) ASSERT((iflags & BMV_IF_DELALLOC) != 0); if (map[i].br_startblock == HOLESTARTBLOCK && whichfork == XFS_ATTR_FORK) { /* came to the end of attribute fork */ out[cur_ext].bmv_oflags |= BMV_OF_LAST; goto out_free_map; } if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext], prealloced, bmvend, map[i].br_startblock)) goto out_free_map; bmv->bmv_offset = out[cur_ext].bmv_offset + out[cur_ext].bmv_length; bmv->bmv_length = max_t(__int64_t, 0, bmvend - bmv->bmv_offset); /* * In case we don't want to return the hole, * don't increase cur_ext so that we can reuse * it in the next loop. */ if ((iflags & BMV_IF_NO_HOLES) && map[i].br_startblock == HOLESTARTBLOCK) { memset(&out[cur_ext], 0, sizeof(out[cur_ext])); continue; } nexleft--; bmv->bmv_entries++; cur_ext++; } } while (nmap && nexleft && bmv->bmv_length); out_free_map: kmem_free(map); out_unlock_ilock: xfs_iunlock_map_shared(ip, lock); out_unlock_iolock: xfs_iunlock(ip, XFS_IOLOCK_SHARED); for (i = 0; i < cur_ext; i++) { int full = 0; /* user array is full */ /* format results & advance arg */ error = formatter(&arg, &out[i], &full); if (error || full) break; } if (is_vmalloc_addr(out)) kmem_free_large(out); else kmem_free(out); return error; } /* * dead simple method of punching delalyed allocation blocks from a range in * the inode. Walks a block at a time so will be slow, but is only executed in * rare error cases so the overhead is not critical. This will alays punch out * both the start and end blocks, even if the ranges only partially overlap * them, so it is up to the caller to ensure that partial blocks are not * passed in. */ int xfs_bmap_punch_delalloc_range( struct xfs_inode *ip, xfs_fileoff_t start_fsb, xfs_fileoff_t length) { xfs_fileoff_t remaining = length; int error = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); do { int done; xfs_bmbt_irec_t imap; int nimaps = 1; xfs_fsblock_t firstblock; xfs_bmap_free_t flist; /* * Map the range first and check that it is a delalloc extent * before trying to unmap the range. Otherwise we will be * trying to remove a real extent (which requires a * transaction) or a hole, which is probably a bad idea... */ error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps, XFS_BMAPI_ENTIRE); if (error) { /* something screwed, just bail */ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_alert(ip->i_mount, "Failed delalloc mapping lookup ino %lld fsb %lld.", ip->i_ino, start_fsb); } break; } if (!nimaps) { /* nothing there */ goto next_block; } if (imap.br_startblock != DELAYSTARTBLOCK) { /* been converted, ignore */ goto next_block; } WARN_ON(imap.br_blockcount == 0); /* * Note: while we initialise the firstblock/flist pair, they * should never be used because blocks should never be * allocated or freed for a delalloc extent and hence we need * don't cancel or finish them after the xfs_bunmapi() call. */ xfs_bmap_init(&flist, &firstblock); error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock, &flist, &done); if (error) break; ASSERT(!flist.xbf_count && !flist.xbf_first); next_block: start_fsb++; remaining--; } while(remaining > 0); return error; }
gpl-2.0
mifl/android_kernel_pantech_ef44s
drivers/media/video/gspca/gl860/gl860.c
4899
19028
/* GSPCA subdrivers for Genesys Logic webcams with the GL860 chip * Subdriver core * * 2009/09/24 Olivier Lorin <o.lorin@laposte.net> * GSPCA by Jean-Francois Moine <http://moinejf.free.fr> * Thanks BUGabundo and Malmostoso for your amazing help! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "gspca.h" #include "gl860.h" MODULE_AUTHOR("Olivier Lorin <o.lorin@laposte.net>"); MODULE_DESCRIPTION("Genesys Logic USB PC Camera Driver"); MODULE_LICENSE("GPL"); /*======================== static function declarations ====================*/ static void (*dev_init_settings)(struct gspca_dev *gspca_dev); static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); static int sd_init(struct gspca_dev *gspca_dev); static int sd_isoc_init(struct gspca_dev *gspca_dev); static int sd_start(struct gspca_dev *gspca_dev); static void sd_stop0(struct gspca_dev *gspca_dev); static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len); static void sd_callback(struct gspca_dev *gspca_dev); static int gl860_guess_sensor(struct gspca_dev *gspca_dev, u16 vendor_id, u16 product_id); /*============================ driver options ==============================*/ static s32 AC50Hz = 0xff; module_param(AC50Hz, int, 0644); MODULE_PARM_DESC(AC50Hz, " Does AC power frequency is 50Hz? (0/1)"); static char sensor[7]; module_param_string(sensor, sensor, sizeof(sensor), 0644); MODULE_PARM_DESC(sensor, " Driver sensor ('MI1320'/'MI2020'/'OV9655'/'OV2640')"); /*============================ webcam controls =============================*/ /* Functions to get and set a control value */ #define SD_SETGET(thename) \ static int sd_set_##thename(struct gspca_dev *gspca_dev, s32 val)\ {\ struct sd *sd = (struct sd *) gspca_dev;\ \ sd->vcur.thename = val;\ if (gspca_dev->streaming)\ sd->waitSet = 1;\ return 0;\ } \ static int sd_get_##thename(struct gspca_dev *gspca_dev, s32 *val)\ {\ struct sd *sd = (struct sd *) gspca_dev;\ \ *val = sd->vcur.thename;\ return 0;\ } SD_SETGET(mirror) SD_SETGET(flip) SD_SETGET(AC50Hz) SD_SETGET(backlight) SD_SETGET(brightness) SD_SETGET(gamma) SD_SETGET(hue) SD_SETGET(saturation) SD_SETGET(sharpness) SD_SETGET(whitebal) SD_SETGET(contrast) #define GL860_NCTRLS 11 /* control table */ static struct ctrl sd_ctrls_mi1320[GL860_NCTRLS]; static struct ctrl sd_ctrls_mi2020[GL860_NCTRLS]; static struct ctrl sd_ctrls_ov2640[GL860_NCTRLS]; static struct ctrl sd_ctrls_ov9655[GL860_NCTRLS]; #define SET_MY_CTRL(theid, \ thetype, thelabel, thename) \ if (sd->vmax.thename != 0) {\ sd_ctrls[nCtrls].qctrl.id = theid;\ sd_ctrls[nCtrls].qctrl.type = thetype;\ strcpy(sd_ctrls[nCtrls].qctrl.name, thelabel);\ sd_ctrls[nCtrls].qctrl.minimum = 0;\ sd_ctrls[nCtrls].qctrl.maximum = sd->vmax.thename;\ sd_ctrls[nCtrls].qctrl.default_value = sd->vcur.thename;\ sd_ctrls[nCtrls].qctrl.step = \ (sd->vmax.thename < 16) ? 1 : sd->vmax.thename/16;\ sd_ctrls[nCtrls].set = sd_set_##thename;\ sd_ctrls[nCtrls].get = sd_get_##thename;\ nCtrls++;\ } static int gl860_build_control_table(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct ctrl *sd_ctrls; int nCtrls = 0; if (_MI1320_) sd_ctrls = sd_ctrls_mi1320; else if (_MI2020_) sd_ctrls = sd_ctrls_mi2020; else if (_OV2640_) sd_ctrls = sd_ctrls_ov2640; else if (_OV9655_) sd_ctrls = sd_ctrls_ov9655; else return 0; memset(sd_ctrls, 0, GL860_NCTRLS * sizeof(struct ctrl)); SET_MY_CTRL(V4L2_CID_BRIGHTNESS, V4L2_CTRL_TYPE_INTEGER, "Brightness", brightness) SET_MY_CTRL(V4L2_CID_SHARPNESS, V4L2_CTRL_TYPE_INTEGER, "Sharpness", sharpness) SET_MY_CTRL(V4L2_CID_CONTRAST, V4L2_CTRL_TYPE_INTEGER, "Contrast", contrast) SET_MY_CTRL(V4L2_CID_GAMMA, V4L2_CTRL_TYPE_INTEGER, "Gamma", gamma) SET_MY_CTRL(V4L2_CID_HUE, V4L2_CTRL_TYPE_INTEGER, "Palette", hue) SET_MY_CTRL(V4L2_CID_SATURATION, V4L2_CTRL_TYPE_INTEGER, "Saturation", saturation) SET_MY_CTRL(V4L2_CID_WHITE_BALANCE_TEMPERATURE, V4L2_CTRL_TYPE_INTEGER, "White Bal.", whitebal) SET_MY_CTRL(V4L2_CID_BACKLIGHT_COMPENSATION, V4L2_CTRL_TYPE_INTEGER, "Backlight" , backlight) SET_MY_CTRL(V4L2_CID_HFLIP, V4L2_CTRL_TYPE_BOOLEAN, "Mirror", mirror) SET_MY_CTRL(V4L2_CID_VFLIP, V4L2_CTRL_TYPE_BOOLEAN, "Flip", flip) SET_MY_CTRL(V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CTRL_TYPE_BOOLEAN, "AC power 50Hz", AC50Hz) return nCtrls; } /*==================== sud-driver structure initialisation =================*/ static const struct sd_desc sd_desc_mi1320 = { .name = MODULE_NAME, .ctrls = sd_ctrls_mi1320, .nctrls = GL860_NCTRLS, .config = sd_config, .init = sd_init, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; static const struct sd_desc sd_desc_mi2020 = { .name = MODULE_NAME, .ctrls = sd_ctrls_mi2020, .nctrls = GL860_NCTRLS, .config = sd_config, .init = sd_init, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; static const struct sd_desc sd_desc_ov2640 = { .name = MODULE_NAME, .ctrls = sd_ctrls_ov2640, .nctrls = GL860_NCTRLS, .config = sd_config, .init = sd_init, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; static const struct sd_desc sd_desc_ov9655 = { .name = MODULE_NAME, .ctrls = sd_ctrls_ov9655, .nctrls = GL860_NCTRLS, .config = sd_config, .init = sd_init, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; /*=========================== sub-driver image sizes =======================*/ static struct v4l2_pix_format mi2020_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, { 800, 598, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 598, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, {1280, 1024, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 }, {1600, 1198, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1198, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3 }, }; static struct v4l2_pix_format ov2640_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, { 800, 600, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, {1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 }, {1600, 1200, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1200, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3 }, }; static struct v4l2_pix_format mi1320_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, { 800, 600, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, {1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 }, }; static struct v4l2_pix_format ov9655_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, {1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, }; /*========================= sud-driver functions ===========================*/ /* This function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; u16 vendor_id, product_id; /* Get USB VendorID and ProductID */ vendor_id = id->idVendor; product_id = id->idProduct; sd->nbRightUp = 1; sd->nbIm = -1; sd->sensor = 0xff; if (strcmp(sensor, "MI1320") == 0) sd->sensor = ID_MI1320; else if (strcmp(sensor, "OV2640") == 0) sd->sensor = ID_OV2640; else if (strcmp(sensor, "OV9655") == 0) sd->sensor = ID_OV9655; else if (strcmp(sensor, "MI2020") == 0) sd->sensor = ID_MI2020; /* Get sensor and set the suitable init/start/../stop functions */ if (gl860_guess_sensor(gspca_dev, vendor_id, product_id) == -1) return -1; cam = &gspca_dev->cam; switch (sd->sensor) { case ID_MI1320: gspca_dev->sd_desc = &sd_desc_mi1320; cam->cam_mode = mi1320_mode; cam->nmodes = ARRAY_SIZE(mi1320_mode); dev_init_settings = mi1320_init_settings; break; case ID_MI2020: gspca_dev->sd_desc = &sd_desc_mi2020; cam->cam_mode = mi2020_mode; cam->nmodes = ARRAY_SIZE(mi2020_mode); dev_init_settings = mi2020_init_settings; break; case ID_OV2640: gspca_dev->sd_desc = &sd_desc_ov2640; cam->cam_mode = ov2640_mode; cam->nmodes = ARRAY_SIZE(ov2640_mode); dev_init_settings = ov2640_init_settings; break; case ID_OV9655: gspca_dev->sd_desc = &sd_desc_ov9655; cam->cam_mode = ov9655_mode; cam->nmodes = ARRAY_SIZE(ov9655_mode); dev_init_settings = ov9655_init_settings; break; } dev_init_settings(gspca_dev); if (AC50Hz != 0xff) ((struct sd *) gspca_dev)->vcur.AC50Hz = AC50Hz; gl860_build_control_table(gspca_dev); return 0; } /* This function is called at probe time after sd_config */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_init_at_startup(gspca_dev); } /* This function is called before to choose the alt setting */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_configure_alt(gspca_dev); } /* This function is called to start the webcam */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_init_pre_alt(gspca_dev); } /* This function is called to stop the webcam */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_post_unset_alt(gspca_dev); } /* This function is called when an image is being received */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; static s32 nSkipped; s32 mode = (s32) gspca_dev->curr_mode; s32 nToSkip = sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); /* Test only against 0202h, so endianess does not matter */ switch (*(s16 *) data) { case 0x0202: /* End of frame, start a new one */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); nSkipped = 0; if (sd->nbIm >= 0 && sd->nbIm < 10) sd->nbIm++; gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); break; default: data += 2; len -= 2; if (nSkipped + len <= nToSkip) nSkipped += len; else { if (nSkipped < nToSkip && nSkipped + len > nToSkip) { data += nToSkip - nSkipped; len -= nToSkip - nSkipped; nSkipped = nToSkip + 1; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } break; } } /* This function is called when an image has been read */ /* This function is used to monitor webcam orientation */ static void sd_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!_OV9655_) { u8 state; u8 upsideDown; /* Probe sensor orientation */ ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, (void *)&state); /* C8/40 means upside-down (looking backwards) */ /* D8/50 means right-up (looking onwards) */ upsideDown = (state == 0xc8 || state == 0x40); if (upsideDown && sd->nbRightUp > -4) { if (sd->nbRightUp > 0) sd->nbRightUp = 0; if (sd->nbRightUp == -3) { sd->mirrorMask = 1; sd->waitSet = 1; } sd->nbRightUp--; } if (!upsideDown && sd->nbRightUp < 4) { if (sd->nbRightUp < 0) sd->nbRightUp = 0; if (sd->nbRightUp == 3) { sd->mirrorMask = 0; sd->waitSet = 1; } sd->nbRightUp++; } } if (sd->waitSet) sd->dev_camera_settings(gspca_dev); } /*=================== USB driver structure initialisation ==================*/ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x05e3, 0x0503)}, {USB_DEVICE(0x05e3, 0xf191)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc_mi1320, sizeof(struct sd), THIS_MODULE); } static void sd_disconnect(struct usb_interface *intf) { gspca_disconnect(intf); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = sd_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /*====================== Init and Exit module functions ====================*/ module_usb_driver(sd_driver); /*==========================================================================*/ int gl860_RTx(struct gspca_dev *gspca_dev, unsigned char pref, u32 req, u16 val, u16 index, s32 len, void *pdata) { struct usb_device *udev = gspca_dev->dev; s32 r = 0; if (pref == 0x40) { /* Send */ if (len > 0) { memcpy(gspca_dev->usb_buf, pdata, len); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, pref, val, index, gspca_dev->usb_buf, len, 400 + 200 * (len > 1)); } else { r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, pref, val, index, NULL, len, 400); } } else { /* Receive */ if (len > 0) { r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req, pref, val, index, gspca_dev->usb_buf, len, 400 + 200 * (len > 1)); memcpy(pdata, gspca_dev->usb_buf, len); } else { r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req, pref, val, index, NULL, len, 400); } } if (r < 0) pr_err("ctrl transfer failed %4d [p%02x r%d v%04x i%04x len%d]\n", r, pref, req, val, index, len); else if (len > 1 && r < len) PDEBUG(D_ERR, "short ctrl transfer %d/%d", r, len); msleep(1); return r; } int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) { int n; for (n = 0; n < len; n++) { if (tbl[n].idx != 0xffff) ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, 0, NULL); else if (tbl[n].val == 0xffff) break; else msleep(tbl[n].val); } return n; } int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len, int n) { while (++n < len) { if (tbl[n].idx != 0xffff) ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, 0, NULL); else if (tbl[n].val == 0xffff) break; else msleep(tbl[n].val); } return n; } void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len) { int n; for (n = 0; n < len; n++) { if (memcmp(tbl[n].data, "\xff\xff\xff", 3) != 0) ctrl_out(gspca_dev, 0x40, 3, 0x7a00, tbl[n].idx, 3, tbl[n].data); else msleep(tbl[n].idx); } } static int gl860_guess_sensor(struct gspca_dev *gspca_dev, u16 vendor_id, u16 product_id) { struct sd *sd = (struct sd *) gspca_dev; u8 probe, nb26, nb96, nOV, ntry; if (product_id == 0xf191) sd->sensor = ID_MI1320; if (sd->sensor == 0xff) { ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &probe); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &probe); ctrl_out(gspca_dev, 0x40, 1, 0x0000, 0x0000, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0010, 0x0010, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0008, 0x00c0, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0001, 0x00c1, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0001, 0x00c2, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0020, 0x0006, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x006a, 0x000d, 0, NULL); msleep(56); PDEBUG(D_PROBE, "probing for sensor MI2020 or OVXXXX"); nOV = 0; for (ntry = 0; ntry < 4; ntry++) { ctrl_out(gspca_dev, 0x40, 1, 0x0040, 0x0000, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0063, 0x0006, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x7a00, 0x8030, 0, NULL); msleep(10); ctrl_in(gspca_dev, 0xc0, 2, 0x7a00, 0x8030, 1, &probe); PDEBUG(D_PROBE, "probe=0x%02x", probe); if (probe == 0xff) nOV++; } if (nOV) { PDEBUG(D_PROBE, "0xff -> OVXXXX"); PDEBUG(D_PROBE, "probing for sensor OV2640 or OV9655"); nb26 = nb96 = 0; for (ntry = 0; ntry < 4; ntry++) { ctrl_out(gspca_dev, 0x40, 1, 0x0040, 0x0000, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x800a, 0, NULL); msleep(10); /* Wait for 26(OV2640) or 96(OV9655) */ ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x800a, 1, &probe); if (probe == 0x26 || probe == 0x40) { PDEBUG(D_PROBE, "probe=0x%02x -> OV2640", probe); sd->sensor = ID_OV2640; nb26 += 4; break; } if (probe == 0x96 || probe == 0x55) { PDEBUG(D_PROBE, "probe=0x%02x -> OV9655", probe); sd->sensor = ID_OV9655; nb96 += 4; break; } PDEBUG(D_PROBE, "probe=0x%02x", probe); if (probe == 0x00) nb26++; if (probe == 0xff) nb96++; msleep(3); } if (nb26 < 4 && nb96 < 4) return -1; } else { PDEBUG(D_PROBE, "Not any 0xff -> MI2020"); sd->sensor = ID_MI2020; } } if (_MI1320_) { PDEBUG(D_PROBE, "05e3:f191 sensor MI1320 (1.3M)"); } else if (_MI2020_) { PDEBUG(D_PROBE, "05e3:0503 sensor MI2020 (2.0M)"); } else if (_OV9655_) { PDEBUG(D_PROBE, "05e3:0503 sensor OV9655 (1.3M)"); } else if (_OV2640_) { PDEBUG(D_PROBE, "05e3:0503 sensor OV2640 (2.0M)"); } else { PDEBUG(D_PROBE, "***** Unknown sensor *****"); return -1; } return 0; }
gpl-2.0
curbthepain/android_kernel_htc_a11chl
drivers/staging/rtl8192e/rtllib_tx.c
4899
27378
/****************************************************************************** Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: James P. Ketrenos <ipw2100-admin@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <linux/if_vlan.h> #include "rtllib.h" /* 802.11 Data Frame 802.11 frame_contorl for data frames - 2 bytes ,-----------------------------------------------------------------------------------------. bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep | | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | | '-----------------------------------------------------------------------------------------' /\ | 802.11 Data Frame | ,--------- 'ctrl' expands to >-----------' | ,--'---,-------------------------------------------------------------. Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | |------|------|---------|---------|---------|------|---------|------| Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | | tion | (BSSID) | | | ence | data | | `--------------------------------------------------| |------' Total: 28 non-data bytes `----.----' | .- 'Frame data' expands to <---------------------------' | V ,---------------------------------------------------. Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | |------|------|---------|----------|------|---------| Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | DSAP | SSAP | | | | Packet | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | `-----------------------------------------| | Total: 8 non-data bytes `----.----' | .- 'IP Packet' expands, if WEP enabled, to <--' | V ,-----------------------. Bytes | 4 | 0-2296 | 4 | |-----|-----------|-----| Desc. | IV | Encrypted | ICV | | | IP Packet | | `-----------------------' Total: 8 non-data bytes 802.3 Ethernet Data Frame ,-----------------------------------------. Bytes | 6 | 6 | 2 | Variable | 4 | |-------|-------|------|-----------|------| Desc. | Dest. | Source| Type | IP Packet | fcs | | MAC | MAC | | | | `-----------------------------------------' Total: 18 non-data bytes In the event that fragmentation is required, the incoming payload is split into N parts of size ieee->fts. The first fragment contains the SNAP header and the remaining packets are just data. If encryption is enabled, each fragment payload size is reduced by enough space to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to 500 without encryption it will take 3 frames. With WEP it will take 4 frames as the payload of each frame is reduced to 492 bytes. * SKB visualization * * ,- skb->data * | * | ETHERNET HEADER ,-<-- PAYLOAD * | | 14 bytes from skb->data * | 2 bytes for Type --> ,T. | (sizeof ethhdr) * | | | | * |,-Dest.--. ,--Src.---. | | | * | 6 bytes| | 6 bytes | | | | * v | | | | | | * 0 | v 1 | v | v 2 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * ^ | ^ | ^ | * | | | | | | * | | | | `T' <---- 2 bytes for Type * | | | | * | | '---SNAP--' <-------- 6 bytes for SNAP * | | * `-IV--' <-------------------- 4 bytes for IV (WEP) * * SNAP HEADER * */ static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; inline int rtllib_put_snap(u8 *data, u16 h_proto) { struct rtllib_snap_hdr *snap; u8 *oui; snap = (struct rtllib_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(u16 *)(data + SNAP_SIZE) = htons(h_proto); return SNAP_SIZE + sizeof(u16); } int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag, int hdr_len) { struct lib80211_crypt_data *crypt = NULL; int res; crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; if (!(crypt && crypt->ops)) { printk(KERN_INFO "=========>%s(), crypt is null\n", __func__); return -1; } /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_INFO "%s: Encryption failed: len=%d.\n", ieee->dev->name, frag->len); ieee->ieee_stats.tx_discards++; return -1; } return 0; } void rtllib_txb_free(struct rtllib_txb *txb) { if (unlikely(!txb)) return; kfree(txb); } static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size, gfp_t gfp_mask) { struct rtllib_txb *txb; int i; txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags), gfp_mask); if (!txb) return NULL; memset(txb, 0, sizeof(struct rtllib_txb)); txb->nr_frags = nr_frags; txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { txb->fragments[i] = dev_alloc_skb(txb_size); if (unlikely(!txb->fragments[i])) { i--; break; } memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); } if (unlikely(i != nr_frags)) { while (i >= 0) dev_kfree_skb_any(txb->fragments[i--]); kfree(txb); return NULL; } return txb; } static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu) { struct ethhdr *eth; struct iphdr *ip; eth = (struct ethhdr *)skb->data; if (eth->h_proto != htons(ETH_P_IP)) return 0; RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len); ip = ip_hdr(skb); switch (ip->tos & 0xfc) { case 0x20: return 2; case 0x40: return 1; case 0x60: return 3; case 0x80: return 4; case 0xa0: return 5; case 0xc0: return 6; case 0xe0: return 7; default: return 0; } } static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee, struct sk_buff *skb, struct cb_desc *tcb_desc) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct tx_ts_record *pTxTs = NULL; struct rtllib_hdr_1addr* hdr = (struct rtllib_hdr_1addr *)skb->data; if (rtllib_act_scanning(ieee, false)) return; if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) return; if (!IsQoSDataFrame(skb->data)) return; if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1)) return; if (tcb_desc->bdhcp || ieee->CntAfterLink < 2) return; if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION) return; if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) return; if (pHTInfo->bCurrentAMPDUEnable) { if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) { printk(KERN_INFO "%s: can't get TS\n", __func__); return; } if (pTxTs->TxAdmittedBARecord.bValid == false) { if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA)) { ; } else if (tcb_desc->bdhcp == 1) { ; } else if (!pTxTs->bDisable_AddBa) { TsStartAddBaProcess(ieee, pTxTs); } goto FORCED_AGG_SETTING; } else if (pTxTs->bUsingBa == false) { if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096)) pTxTs->bUsingBa = true; else goto FORCED_AGG_SETTING; } if (ieee->iw_mode == IW_MODE_INFRA) { tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor; tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity; } } FORCED_AGG_SETTING: switch (pHTInfo->ForcedAMPDUMode) { case HT_AGG_AUTO: break; case HT_AGG_FORCE_ENABLE: tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity; tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor; break; case HT_AGG_FORCE_DISABLE: tcb_desc->bAMPDUEnable = false; tcb_desc->ampdu_density = 0; tcb_desc->ampdu_factor = 0; break; } return; } static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { tcb_desc->bUseShortPreamble = false; if (tcb_desc->data_rate == 2) return; else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) tcb_desc->bUseShortPreamble = true; return; } static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; tcb_desc->bUseShortGI = false; if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) return; if (pHTInfo->bForcedShortGI) { tcb_desc->bUseShortGI = true; return; } if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz) tcb_desc->bUseShortGI = true; else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz) tcb_desc->bUseShortGI = true; } static void rtllib_query_BandwidthMode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; tcb_desc->bPacketBW = false; if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) return; if (tcb_desc->bMulticast || tcb_desc->bBroadcast) return; if ((tcb_desc->data_rate & 0x80) == 0) return; if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz) tcb_desc->bPacketBW = true; return; } static void rtllib_query_protectionmode(struct rtllib_device *ieee, struct cb_desc *tcb_desc, struct sk_buff *skb) { tcb_desc->bRTSSTBC = false; tcb_desc->bRTSUseShortGI = false; tcb_desc->bCTSEnable = false; tcb_desc->RTSSC = 0; tcb_desc->bRTSBW = false; if (tcb_desc->bBroadcast || tcb_desc->bMulticast) return; if (is_broadcast_ether_addr(skb->data+16)) return; if (ieee->mode < IEEE_N_24G) { if (skb->len > ieee->rts) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; } else if (ieee->current_network.buseprotection) { tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; } return; } else { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; while (true) { if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS | HT_IOT_ACT_PURE_N_MODE)) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; break; } if (ieee->current_network.buseprotection) { tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; break; } if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) { u8 HTOpMode = pHTInfo->CurrentOpMode; if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) || (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } } if (skb->len > ieee->rts) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } if (tcb_desc->bAMPDUEnable) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = false; break; } goto NO_PROTECTION; } } if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) tcb_desc->bUseShortPreamble = true; if (ieee->iw_mode == IW_MODE_MASTER) goto NO_PROTECTION; return; NO_PROTECTION: tcb_desc->bRTSEnable = false; tcb_desc->bCTSEnable = false; tcb_desc->rts_rate = 0; tcb_desc->RTSSC = 0; tcb_desc->bRTSBW = false; } static void rtllib_txrate_selectmode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { if (ieee->bTxDisableRateFallBack) tcb_desc->bTxDisableRateFallBack = true; if (ieee->bTxUseDriverAssingedRate) tcb_desc->bTxUseDriverAssingedRate = true; if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) { if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) tcb_desc->RATRIndex = 0; } } u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb, u8 *dst) { u16 seqnum = 0; if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst)) return 0; if (IsQoSDataFrame(skb->data)) { struct tx_ts_record *pTS = NULL; if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true)) return 0; seqnum = pTS->TxCurSeq; pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096; return seqnum; } return 0; } static int wme_downgrade_ac(struct sk_buff *skb) { switch (skb->priority) { case 6: case 7: skb->priority = 5; /* VO -> VI */ return 0; case 4: case 5: skb->priority = 3; /* VI -> BE */ return 0; case 0: case 3: skb->priority = 1; /* BE -> BK */ return 0; default: return -1; } } int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) { struct rtllib_device *ieee = (struct rtllib_device *) netdev_priv_rsl(dev); struct rtllib_txb *txb = NULL; struct rtllib_hdr_3addrqos *frag_hdr; int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; unsigned long flags; struct net_device_stats *stats = &ieee->stats; int ether_type = 0, encrypt; int bytes, fc, qos_ctl = 0, hdr_len; struct sk_buff *skb_frag; struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0, .qos_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; int qos_actived = ieee->current_network.qos_data.active; struct lib80211_crypt_data *crypt = NULL; struct cb_desc *tcb_desc; u8 bIsMulticast = false; u8 IsAmsdu = false; bool bdhcp = false; spin_lock_irqsave(&ieee->lock, flags); /* If there is no driver handler to take the TXB, dont' bother * creating it... */ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) || ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) { printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); goto success; } if (likely(ieee->raw_tx == 0)) { if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } /* Save source and destination addresses */ memcpy(dest, skb->data, ETH_ALEN); memcpy(src, skb->data+ETH_ALEN, ETH_ALEN); memset(skb->cb, 0, sizeof(skb->cb)); ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto); if (ieee->iw_mode == IW_MODE_MONITOR) { txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate " "TXB\n", ieee->dev->name); goto failed; } txb->encrypted = 0; txb->payload_size = skb->len; memcpy(skb_put(txb->fragments[0], skb->len), skb->data, skb->len); goto success; } if (skb->len > 282) { if (ETH_P_IP == ether_type) { const struct iphdr *ip = (struct iphdr *) ((u8 *)skb->data+14); if (IPPROTO_UDP == ip->protocol) { struct udphdr *udp; udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); if (((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) || ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) { bdhcp = true; ieee->LPSDelayCnt = 200; } } } else if (ETH_P_ARP == ether_type) { printk(KERN_INFO "=================>DHCP " "Protocol start tx ARP pkt!!\n"); bdhcp = true; ieee->LPSDelayCnt = ieee->current_network.tim.tim_count; } } skb->priority = rtllib_classify(skb, IsAmsdu); crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->host_encrypt && crypt && crypt->ops; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { stats->tx_dropped++; goto success; } if (crypt && !encrypt && ether_type == ETH_P_PAE) { struct eapol *eap = (struct eapol *)(skb->data + sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16)); RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n", eap_get_type(eap->type)); } /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); if (encrypt) fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP; else fc = RTLLIB_FTYPE_DATA; if (qos_actived) fc |= RTLLIB_STYPE_QOS_DATA; else fc |= RTLLIB_STYPE_DATA; if (ieee->iw_mode == IW_MODE_INFRA) { fc |= RTLLIB_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN); memcpy(&header.addr2, &src, ETH_ALEN); if (IsAmsdu) memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN); else memcpy(&header.addr3, &dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(&header.addr1, dest, ETH_ALEN); memcpy(&header.addr2, src, ETH_ALEN); memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN); } bIsMulticast = is_broadcast_ether_addr(header.addr1) || is_multicast_ether_addr(header.addr1); header.frame_ctl = cpu_to_le16(fc); /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ if (bIsMulticast) { frag_size = MAX_FRAG_THRESHOLD; qos_ctl |= QOS_CTL_NOTCONTAIN_ACK; } else { frag_size = ieee->fts; qos_ctl = 0; } if (qos_actived) { hdr_len = RTLLIB_3ADDR_LEN + 2; /* in case we are a client verify acm is not set for this ac */ while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) { printk(KERN_INFO "skb->priority = %x\n", skb->priority); if (wme_downgrade_ac(skb)) break; printk(KERN_INFO "converted skb->priority = %x\n", skb->priority); } qos_ctl |= skb->priority; header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID); } else { hdr_len = RTLLIB_3ADDR_LEN; } /* Determine amount of payload per fragment. Regardless of if * this stack is providing the full 802.11 header, one will * eventually be affixed to this fragment -- so we must account * for it when determining the amount of payload space. */ bytes_per_frag = frag_size - hdr_len; if (ieee->config & (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS)) bytes_per_frag -= RTLLIB_FCS_LEN; /* Each fragment may need to have room for encryptiong * pre/postfix */ if (encrypt) { bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_mpdu_postfix_len + crypt->ops->extra_msdu_prefix_len + crypt->ops->extra_msdu_postfix_len; } /* Number of fragments is the total bytes_per_frag / * payload_per_fragment */ nr_frags = bytes / bytes_per_frag; bytes_last_frag = bytes % bytes_per_frag; if (bytes_last_frag) nr_frags++; else bytes_last_frag = bytes_per_frag; /* When we allocate the TXB we allocate enough space for the * reserve and full fragment bytes (bytes_per_frag doesn't * include prefix, postfix, header, FCS, etc.) */ txb = rtllib_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = encrypt; txb->payload_size = bytes; if (qos_actived) txb->queue_index = UP2AC(skb->priority); else txb->queue_index = WME_AC_BE; for (i = 0; i < nr_frags; i++) { skb_frag = txb->fragments[i]; tcb_desc = (struct cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE); if (qos_actived) { skb_frag->priority = skb->priority; tcb_desc->queue_index = UP2AC(skb->priority); } else { skb_frag->priority = WME_AC_BE; tcb_desc->queue_index = WME_AC_BE; } skb_reserve(skb_frag, ieee->tx_headroom); if (encrypt) { if (ieee->hwsec_active) tcb_desc->bHwSec = 1; else tcb_desc->bHwSec = 0; skb_reserve(skb_frag, crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_msdu_prefix_len); } else { tcb_desc->bHwSec = 0; } frag_hdr = (struct rtllib_hdr_3addrqos *) skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the * MOREFRAGS bit to the frame control */ if (i != nr_frags - 1) { frag_hdr->frame_ctl = cpu_to_le16( fc | RTLLIB_FCTL_MOREFRAGS); bytes = bytes_per_frag; } else { /* The last fragment has the remaining length */ bytes = bytes_last_frag; } if ((qos_actived) && (!bIsMulticast)) { frag_hdr->seq_ctl = rtllib_query_seqnum(ieee, skb_frag, header.addr1); frag_hdr->seq_ctl = cpu_to_le16(frag_hdr->seq_ctl<<4 | i); } else { frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i); } /* Put a SNAP header on the first fragment */ if (i == 0) { rtllib_put_snap( skb_put(skb_frag, SNAP_SIZE + sizeof(u16)), ether_type); bytes -= SNAP_SIZE + sizeof(u16); } memcpy(skb_put(skb_frag, bytes), skb->data, bytes); /* Advance the SKB... */ skb_pull(skb, bytes); /* Encryption routine will move the header forward in * order to insert the IV between the header and the * payload */ if (encrypt) rtllib_encrypt_fragment(ieee, skb_frag, hdr_len); if (ieee->config & (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS)) skb_put(skb_frag, 4); } if ((qos_actived) && (!bIsMulticast)) { if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF) ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0; else ieee->seq_ctrl[UP2AC(skb->priority) + 1]++; } else { if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; } } else { if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC); if (!txb) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = 0; txb->payload_size = skb->len; memcpy(skb_put(txb->fragments[0], skb->len), skb->data, skb->len); } success: if (txb) { struct cb_desc *tcb_desc = (struct cb_desc *) (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bTxEnableFwCalcDur = 1; tcb_desc->priority = skb->priority; if (ether_type == ETH_P_PAE) { if (ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) { tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee); tcb_desc->bTxDisableRateFallBack = false; } else { tcb_desc->data_rate = ieee->basic_rate; tcb_desc->bTxDisableRateFallBack = 1; } tcb_desc->RATRIndex = 7; tcb_desc->bTxUseDriverAssingedRate = 1; } else { if (is_multicast_ether_addr(header.addr1)) tcb_desc->bMulticast = 1; if (is_broadcast_ether_addr(header.addr1)) tcb_desc->bBroadcast = 1; rtllib_txrate_selectmode(ieee, tcb_desc); if (tcb_desc->bMulticast || tcb_desc->bBroadcast) tcb_desc->data_rate = ieee->basic_rate; else tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate); if (bdhcp == true) { if (ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) { tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee); tcb_desc->bTxDisableRateFallBack = false; } else { tcb_desc->data_rate = MGN_1M; tcb_desc->bTxDisableRateFallBack = 1; } tcb_desc->RATRIndex = 7; tcb_desc->bTxUseDriverAssingedRate = 1; tcb_desc->bdhcp = 1; } rtllib_qurey_ShortPreambleMode(ieee, tcb_desc); rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc); rtllib_query_HTCapShortGI(ieee, tcb_desc); rtllib_query_BandwidthMode(ieee, tcb_desc); rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]); } } spin_unlock_irqrestore(&ieee->lock, flags); dev_kfree_skb_any(skb); if (txb) { if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) { dev->stats.tx_packets++; dev->stats.tx_bytes += txb->payload_size; rtllib_softmac_xmit(txb, ieee); } else { if ((*ieee->hard_start_xmit)(txb, dev) == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; } rtllib_txb_free(txb); } } return 0; failed: spin_unlock_irqrestore(&ieee->lock, flags); netif_stop_queue(dev); stats->tx_errors++; return 1; } int rtllib_xmit(struct sk_buff *skb, struct net_device *dev) { memset(skb->cb, 0, sizeof(skb->cb)); return rtllib_xmit_inter(skb, dev); } EXPORT_SYMBOL(rtllib_xmit);
gpl-2.0
Icenowy/linux-kernel-lichee-a33
drivers/media/video/gspca/vicam.c
4899
10426
/* * gspca ViCam subdriver * * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com> * * Based on the usbvideo vicam driver, which is: * * Copyright (c) 2002 Joe Burks (jburks@wavicle.org), * Christopher L Cheney (ccheney@cheney.cx), * Pavel Machek (pavel@ucw.cz), * John Tyner (jtyner@cs.ucr.edu), * Monroe Williams (monroe@pobox.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "vicam" #define HEADER_SIZE 64 #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/ihex.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("GSPCA ViCam USB Camera Driver"); MODULE_LICENSE("GPL"); enum e_ctrl { GAIN, EXPOSURE, NCTRL /* number of controls */ }; struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct work_struct work_struct; struct workqueue_struct *work_thread; struct gspca_ctrl ctrls[NCTRL]; }; /* The vicam sensor has a resolution of 512 x 244, with I believe square pixels, but this is forced to a 4:3 ratio by optics. So it has non square pixels :( */ static struct v4l2_pix_format vicam_mode[] = { { 256, 122, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 256, .sizeimage = 256 * 122, .colorspace = V4L2_COLORSPACE_SRGB,}, /* 2 modes with somewhat more square pixels */ { 256, 200, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 256, .sizeimage = 256 * 200, .colorspace = V4L2_COLORSPACE_SRGB,}, { 256, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 256, .sizeimage = 256 * 240, .colorspace = V4L2_COLORSPACE_SRGB,}, #if 0 /* This mode has extremely non square pixels, testing use only */ { 512, 122, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 512, .sizeimage = 512 * 122, .colorspace = V4L2_COLORSPACE_SRGB,}, #endif { 512, 244, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 512, .sizeimage = 512 * 244, .colorspace = V4L2_COLORSPACE_SRGB,}, }; static const struct ctrl sd_ctrls[] = { [GAIN] = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0, .maximum = 255, .step = 1, .default_value = 200, }, }, [EXPOSURE] = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = 0, .maximum = 2047, .step = 1, .default_value = 256, }, }, }; static int vicam_control_msg(struct gspca_dev *gspca_dev, u8 request, u16 value, u16 index, u8 *data, u16 len) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, len, 1000); if (ret < 0) pr_err("control msg req %02X error %d\n", request, ret); return ret; } static int vicam_set_camera_power(struct gspca_dev *gspca_dev, int state) { int ret; ret = vicam_control_msg(gspca_dev, 0x50, state, 0, NULL, 0); if (ret < 0) return ret; if (state) ret = vicam_control_msg(gspca_dev, 0x55, 1, 0, NULL, 0); return ret; } /* * request and read a block of data - see warning on vicam_command. */ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size) { struct sd *sd = (struct sd *)gspca_dev; int ret, unscaled_height, act_len = 0; u8 *req_data = gspca_dev->usb_buf; memset(req_data, 0, 16); req_data[0] = sd->ctrls[GAIN].val; if (gspca_dev->width == 256) req_data[1] |= 0x01; /* low nibble x-scale */ if (gspca_dev->height <= 122) { req_data[1] |= 0x10; /* high nibble y-scale */ unscaled_height = gspca_dev->height * 2; } else unscaled_height = gspca_dev->height; req_data[2] = 0x90; /* unknown, does not seem to do anything */ if (unscaled_height <= 200) req_data[3] = 0x06; /* vend? */ else if (unscaled_height <= 242) /* Yes 242 not 240 */ req_data[3] = 0x07; /* vend? */ else /* Up to 244 lines with req_data[3] == 0x08 */ req_data[3] = 0x08; /* vend? */ if (sd->ctrls[EXPOSURE].val < 256) { /* Frame rate maxed out, use partial frame expo time */ req_data[4] = 255 - sd->ctrls[EXPOSURE].val; req_data[5] = 0x00; req_data[6] = 0x00; req_data[7] = 0x01; } else { /* Modify frame rate */ req_data[4] = 0x00; req_data[5] = 0x00; req_data[6] = sd->ctrls[EXPOSURE].val & 0xFF; req_data[7] = sd->ctrls[EXPOSURE].val >> 8; } req_data[8] = ((244 - unscaled_height) / 2) & ~0x01; /* vstart */ /* bytes 9-15 do not seem to affect exposure or image quality */ mutex_lock(&gspca_dev->usb_lock); ret = vicam_control_msg(gspca_dev, 0x51, 0x80, 0, req_data, 16); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) return ret; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, 10000); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use the camera's USB interface we take the gspca * usb_lock when performing USB operations. In practice the only thing we need * to protect against is the usb_set_interface call that gspca makes during * stream_off as the camera doesn't provide any controls that the user could try * to change. */ static void vicam_dostream(struct work_struct *work) { struct sd *sd = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &sd->gspca_dev; int ret, frame_sz; u8 *buffer; frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + HEADER_SIZE; buffer = kmalloc(frame_sz, GFP_KERNEL | GFP_DMA); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto exit; } while (gspca_dev->present && gspca_dev->streaming) { ret = vicam_read_frame(gspca_dev, buffer, frame_sz); if (ret < 0) break; /* Note the frame header contents seem to be completely constant, they do not change with either image, or settings. So we simply discard it. The frames have a very similar 64 byte footer, which we don't even bother reading from the cam */ gspca_frame_add(gspca_dev, FIRST_PACKET, buffer + HEADER_SIZE, frame_sz - HEADER_SIZE); gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } exit: kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *sd = (struct sd *)gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; cam->cam_mode = vicam_mode; cam->nmodes = ARRAY_SIZE(vicam_mode); cam->ctrls = sd->ctrls; INIT_WORK(&sd->work_struct, vicam_dostream); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { int ret; const struct ihex_binrec *rec; const struct firmware *uninitialized_var(fw); u8 *firmware_buf; ret = request_ihex_firmware(&fw, "vicam/firmware.fw", &gspca_dev->dev->dev); if (ret) { pr_err("Failed to load \"vicam/firmware.fw\": %d\n", ret); return ret; } firmware_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!firmware_buf) { ret = -ENOMEM; goto exit; } for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) { memcpy(firmware_buf, rec->data, be16_to_cpu(rec->len)); ret = vicam_control_msg(gspca_dev, 0xff, 0, 0, firmware_buf, be16_to_cpu(rec->len)); if (ret < 0) break; } kfree(firmware_buf); exit: release_firmware(fw); return ret; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; int ret; ret = vicam_set_camera_power(gspca_dev, 1); if (ret < 0) return ret; /* Start the workqueue function to do the streaming */ sd->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(sd->work_thread, &sd->work_struct); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *)gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for vicam_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); if (gspca_dev->present) vicam_set_camera_power(gspca_dev, 0); } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x04c1, 0x009d)}, {USB_DEVICE(0x0602, 0x1001)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
jejecule/android_kernel_oppo_msm8974
sound/pci/lola/lola.c
4899
20426
/* * Support for Digigram Lola PCI-e boards * * Copyright (c) 2011 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/pci.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/initval.h> #include "lola.h" /* Standard options */ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram Lola driver."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram Lola driver."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram Lola driver."); /* Lola-specific options */ /* for instance use always max granularity which is compatible * with all sample rates */ static int granularity[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = LOLA_GRANULARITY_MAX }; /* below a sample_rate of 16kHz the analogue audio quality is NOT excellent */ static int sample_rate_min[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1) ] = 16000 }; module_param_array(granularity, int, NULL, 0444); MODULE_PARM_DESC(granularity, "Granularity value"); module_param_array(sample_rate_min, int, NULL, 0444); MODULE_PARM_DESC(sample_rate_min, "Minimal sample rate"); /* */ MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram, Lola}}"); MODULE_DESCRIPTION("Digigram Lola driver"); MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); #ifdef CONFIG_SND_DEBUG_VERBOSE static int debug; module_param(debug, int, 0644); #define verbose_debug(fmt, args...) \ do { if (debug > 1) printk(KERN_DEBUG SFX fmt, ##args); } while (0) #else #define verbose_debug(fmt, args...) #endif /* * pseudo-codec read/write via CORB/RIRB */ static int corb_send_verb(struct lola *chip, unsigned int nid, unsigned int verb, unsigned int data, unsigned int extdata) { unsigned long flags; int ret = -EIO; chip->last_cmd_nid = nid; chip->last_verb = verb; chip->last_data = data; chip->last_extdata = extdata; data |= (nid << 20) | (verb << 8); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->rirb.cmds < LOLA_CORB_ENTRIES - 1) { unsigned int wp = chip->corb.wp + 1; wp %= LOLA_CORB_ENTRIES; chip->corb.wp = wp; chip->corb.buf[wp * 2] = cpu_to_le32(data); chip->corb.buf[wp * 2 + 1] = cpu_to_le32(extdata); lola_writew(chip, BAR0, CORBWP, wp); chip->rirb.cmds++; smp_wmb(); ret = 0; } spin_unlock_irqrestore(&chip->reg_lock, flags); return ret; } static void lola_queue_unsol_event(struct lola *chip, unsigned int res, unsigned int res_ex) { lola_update_ext_clock_freq(chip, res); } /* retrieve RIRB entry - called from interrupt handler */ static void lola_update_rirb(struct lola *chip) { unsigned int rp, wp; u32 res, res_ex; wp = lola_readw(chip, BAR0, RIRBWP); if (wp == chip->rirb.wp) return; chip->rirb.wp = wp; while (chip->rirb.rp != wp) { chip->rirb.rp++; chip->rirb.rp %= LOLA_CORB_ENTRIES; rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */ res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]); res = le32_to_cpu(chip->rirb.buf[rp]); if (res_ex & LOLA_RIRB_EX_UNSOL_EV) lola_queue_unsol_event(chip, res, res_ex); else if (chip->rirb.cmds) { chip->res = res; chip->res_ex = res_ex; smp_wmb(); chip->rirb.cmds--; } } } static int rirb_get_response(struct lola *chip, unsigned int *val, unsigned int *extval) { unsigned long timeout; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { if (chip->polling_mode) { spin_lock_irq(&chip->reg_lock); lola_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); } if (!chip->rirb.cmds) { *val = chip->res; if (extval) *extval = chip->res_ex; verbose_debug("get_response: %x, %x\n", chip->res, chip->res_ex); if (chip->res_ex & LOLA_RIRB_EX_ERROR) { printk(KERN_WARNING SFX "RIRB ERROR: " "NID=%x, verb=%x, data=%x, ext=%x\n", chip->last_cmd_nid, chip->last_verb, chip->last_data, chip->last_extdata); return -EIO; } return 0; } if (time_after(jiffies, timeout)) break; udelay(20); cond_resched(); } printk(KERN_WARNING SFX "RIRB response error\n"); if (!chip->polling_mode) { printk(KERN_WARNING SFX "switching to polling mode\n"); chip->polling_mode = 1; goto again; } return -EIO; } /* aynchronous write of a codec verb with data */ int lola_codec_write(struct lola *chip, unsigned int nid, unsigned int verb, unsigned int data, unsigned int extdata) { verbose_debug("codec_write NID=%x, verb=%x, data=%x, ext=%x\n", nid, verb, data, extdata); return corb_send_verb(chip, nid, verb, data, extdata); } /* write a codec verb with data and read the returned status */ int lola_codec_read(struct lola *chip, unsigned int nid, unsigned int verb, unsigned int data, unsigned int extdata, unsigned int *val, unsigned int *extval) { int err; verbose_debug("codec_read NID=%x, verb=%x, data=%x, ext=%x\n", nid, verb, data, extdata); err = corb_send_verb(chip, nid, verb, data, extdata); if (err < 0) return err; err = rirb_get_response(chip, val, extval); return err; } /* flush all pending codec writes */ int lola_codec_flush(struct lola *chip) { unsigned int tmp; return rirb_get_response(chip, &tmp, NULL); } /* * interrupt handler */ static irqreturn_t lola_interrupt(int irq, void *dev_id) { struct lola *chip = dev_id; unsigned int notify_ins, notify_outs, error_ins, error_outs; int handled = 0; int i; notify_ins = notify_outs = error_ins = error_outs = 0; spin_lock(&chip->reg_lock); for (;;) { unsigned int status, in_sts, out_sts; unsigned int reg; status = lola_readl(chip, BAR1, DINTSTS); if (!status || status == -1) break; in_sts = lola_readl(chip, BAR1, DIINTSTS); out_sts = lola_readl(chip, BAR1, DOINTSTS); /* clear Input Interrupts */ for (i = 0; in_sts && i < chip->pcm[CAPT].num_streams; i++) { if (!(in_sts & (1 << i))) continue; in_sts &= ~(1 << i); reg = lola_dsd_read(chip, i, STS); if (reg & LOLA_DSD_STS_DESE) /* error */ error_ins |= (1 << i); if (reg & LOLA_DSD_STS_BCIS) /* notify */ notify_ins |= (1 << i); /* clear */ lola_dsd_write(chip, i, STS, reg); } /* clear Output Interrupts */ for (i = 0; out_sts && i < chip->pcm[PLAY].num_streams; i++) { if (!(out_sts & (1 << i))) continue; out_sts &= ~(1 << i); reg = lola_dsd_read(chip, i + MAX_STREAM_IN_COUNT, STS); if (reg & LOLA_DSD_STS_DESE) /* error */ error_outs |= (1 << i); if (reg & LOLA_DSD_STS_BCIS) /* notify */ notify_outs |= (1 << i); lola_dsd_write(chip, i + MAX_STREAM_IN_COUNT, STS, reg); } if (status & LOLA_DINT_CTRL) { unsigned char rbsts; /* ring status is byte access */ rbsts = lola_readb(chip, BAR0, RIRBSTS); rbsts &= LOLA_RIRB_INT_MASK; if (rbsts) lola_writeb(chip, BAR0, RIRBSTS, rbsts); rbsts = lola_readb(chip, BAR0, CORBSTS); rbsts &= LOLA_CORB_INT_MASK; if (rbsts) lola_writeb(chip, BAR0, CORBSTS, rbsts); lola_update_rirb(chip); } if (status & (LOLA_DINT_FIFOERR | LOLA_DINT_MUERR)) { /* clear global fifo error interrupt */ lola_writel(chip, BAR1, DINTSTS, (status & (LOLA_DINT_FIFOERR | LOLA_DINT_MUERR))); } handled = 1; } spin_unlock(&chip->reg_lock); lola_pcm_update(chip, &chip->pcm[CAPT], notify_ins); lola_pcm_update(chip, &chip->pcm[PLAY], notify_outs); return IRQ_RETVAL(handled); } /* * controller */ static int reset_controller(struct lola *chip) { unsigned int gctl = lola_readl(chip, BAR0, GCTL); unsigned long end_time; if (gctl) { /* to be sure */ lola_writel(chip, BAR1, BOARD_MODE, 0); return 0; } chip->cold_reset = 1; lola_writel(chip, BAR0, GCTL, LOLA_GCTL_RESET); end_time = jiffies + msecs_to_jiffies(200); do { msleep(1); gctl = lola_readl(chip, BAR0, GCTL); if (gctl) break; } while (time_before(jiffies, end_time)); if (!gctl) { printk(KERN_ERR SFX "cannot reset controller\n"); return -EIO; } return 0; } static void lola_irq_enable(struct lola *chip) { unsigned int val; /* enalbe all I/O streams */ val = (1 << chip->pcm[PLAY].num_streams) - 1; lola_writel(chip, BAR1, DOINTCTL, val); val = (1 << chip->pcm[CAPT].num_streams) - 1; lola_writel(chip, BAR1, DIINTCTL, val); /* enable global irqs */ val = LOLA_DINT_GLOBAL | LOLA_DINT_CTRL | LOLA_DINT_FIFOERR | LOLA_DINT_MUERR; lola_writel(chip, BAR1, DINTCTL, val); } static void lola_irq_disable(struct lola *chip) { lola_writel(chip, BAR1, DINTCTL, 0); lola_writel(chip, BAR1, DIINTCTL, 0); lola_writel(chip, BAR1, DOINTCTL, 0); } static int setup_corb_rirb(struct lola *chip) { int err; unsigned char tmp; unsigned long end_time; err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_SIZE, &chip->rb); if (err < 0) return err; chip->corb.addr = chip->rb.addr; chip->corb.buf = (u32 *)chip->rb.area; chip->rirb.addr = chip->rb.addr + 2048; chip->rirb.buf = (u32 *)(chip->rb.area + 2048); /* disable ringbuffer DMAs */ lola_writeb(chip, BAR0, RIRBCTL, 0); lola_writeb(chip, BAR0, CORBCTL, 0); end_time = jiffies + msecs_to_jiffies(200); do { if (!lola_readb(chip, BAR0, RIRBCTL) && !lola_readb(chip, BAR0, CORBCTL)) break; msleep(1); } while (time_before(jiffies, end_time)); /* CORB set up */ lola_writel(chip, BAR0, CORBLBASE, (u32)chip->corb.addr); lola_writel(chip, BAR0, CORBUBASE, upper_32_bits(chip->corb.addr)); /* set the corb size to 256 entries */ lola_writeb(chip, BAR0, CORBSIZE, 0x02); /* set the corb write pointer to 0 */ lola_writew(chip, BAR0, CORBWP, 0); /* reset the corb hw read pointer */ lola_writew(chip, BAR0, CORBRP, LOLA_RBRWP_CLR); /* enable corb dma */ lola_writeb(chip, BAR0, CORBCTL, LOLA_RBCTL_DMA_EN); /* clear flags if set */ tmp = lola_readb(chip, BAR0, CORBSTS) & LOLA_CORB_INT_MASK; if (tmp) lola_writeb(chip, BAR0, CORBSTS, tmp); chip->corb.wp = 0; /* RIRB set up */ lola_writel(chip, BAR0, RIRBLBASE, (u32)chip->rirb.addr); lola_writel(chip, BAR0, RIRBUBASE, upper_32_bits(chip->rirb.addr)); /* set the rirb size to 256 entries */ lola_writeb(chip, BAR0, RIRBSIZE, 0x02); /* reset the rirb hw write pointer */ lola_writew(chip, BAR0, RIRBWP, LOLA_RBRWP_CLR); /* set N=1, get RIRB response interrupt for new entry */ lola_writew(chip, BAR0, RINTCNT, 1); /* enable rirb dma and response irq */ lola_writeb(chip, BAR0, RIRBCTL, LOLA_RBCTL_DMA_EN | LOLA_RBCTL_IRQ_EN); /* clear flags if set */ tmp = lola_readb(chip, BAR0, RIRBSTS) & LOLA_RIRB_INT_MASK; if (tmp) lola_writeb(chip, BAR0, RIRBSTS, tmp); chip->rirb.rp = chip->rirb.cmds = 0; return 0; } static void stop_corb_rirb(struct lola *chip) { /* disable ringbuffer DMAs */ lola_writeb(chip, BAR0, RIRBCTL, 0); lola_writeb(chip, BAR0, CORBCTL, 0); } static void lola_reset_setups(struct lola *chip) { /* update the granularity */ lola_set_granularity(chip, chip->granularity, true); /* update the sample clock */ lola_set_clock_index(chip, chip->clock.cur_index); /* enable unsolicited events of the clock widget */ lola_enable_clock_events(chip); /* update the analog gains */ lola_setup_all_analog_gains(chip, CAPT, false); /* input, update */ /* update SRC configuration if applicable */ lola_set_src_config(chip, chip->input_src_mask, false); /* update the analog outputs */ lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */ } static int __devinit lola_parse_tree(struct lola *chip) { unsigned int val; int nid, err; err = lola_read_param(chip, 0, LOLA_PAR_VENDOR_ID, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read VENDOR_ID\n"); return err; } val >>= 16; if (val != 0x1369) { printk(KERN_ERR SFX "Unknown codec vendor 0x%x\n", val); return -EINVAL; } err = lola_read_param(chip, 1, LOLA_PAR_FUNCTION_TYPE, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read FUNCTION_TYPE for 0x%x\n", nid); return err; } if (val != 1) { printk(KERN_ERR SFX "Unknown function type %d\n", val); return -EINVAL; } err = lola_read_param(chip, 1, LOLA_PAR_SPECIFIC_CAPS, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read SPECCAPS\n"); return err; } chip->lola_caps = val; chip->pin[CAPT].num_pins = LOLA_AFG_INPUT_PIN_COUNT(chip->lola_caps); chip->pin[PLAY].num_pins = LOLA_AFG_OUTPUT_PIN_COUNT(chip->lola_caps); snd_printdd(SFX "speccaps=0x%x, pins in=%d, out=%d\n", chip->lola_caps, chip->pin[CAPT].num_pins, chip->pin[PLAY].num_pins); if (chip->pin[CAPT].num_pins > MAX_AUDIO_INOUT_COUNT || chip->pin[PLAY].num_pins > MAX_AUDIO_INOUT_COUNT) { printk(KERN_ERR SFX "Invalid Lola-spec caps 0x%x\n", val); return -EINVAL; } nid = 0x02; err = lola_init_pcm(chip, CAPT, &nid); if (err < 0) return err; err = lola_init_pcm(chip, PLAY, &nid); if (err < 0) return err; err = lola_init_pins(chip, CAPT, &nid); if (err < 0) return err; err = lola_init_pins(chip, PLAY, &nid); if (err < 0) return err; if (LOLA_AFG_CLOCK_WIDGET_PRESENT(chip->lola_caps)) { err = lola_init_clock_widget(chip, nid); if (err < 0) return err; nid++; } if (LOLA_AFG_MIXER_WIDGET_PRESENT(chip->lola_caps)) { err = lola_init_mixer_widget(chip, nid); if (err < 0) return err; nid++; } /* enable unsolicited events of the clock widget */ err = lola_enable_clock_events(chip); if (err < 0) return err; /* if last ResetController was not a ColdReset, we don't know * the state of the card; initialize here again */ if (!chip->cold_reset) { lola_reset_setups(chip); chip->cold_reset = 1; } else { /* set the granularity if it is not the default */ if (chip->granularity != LOLA_GRANULARITY_MIN) lola_set_granularity(chip, chip->granularity, true); } return 0; } static void lola_stop_hw(struct lola *chip) { stop_corb_rirb(chip); lola_irq_disable(chip); } static void lola_free(struct lola *chip) { if (chip->initialized) lola_stop_hw(chip); lola_free_pcm(chip); lola_free_mixer(chip); if (chip->irq >= 0) free_irq(chip->irq, (void *)chip); if (chip->bar[0].remap_addr) iounmap(chip->bar[0].remap_addr); if (chip->bar[1].remap_addr) iounmap(chip->bar[1].remap_addr); if (chip->rb.area) snd_dma_free_pages(&chip->rb); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); } static int lola_dev_free(struct snd_device *device) { lola_free(device->device_data); return 0; } static int __devinit lola_create(struct snd_card *card, struct pci_dev *pci, int dev, struct lola **rchip) { struct lola *chip; int err; unsigned int dever; static struct snd_device_ops ops = { .dev_free = lola_dev_free, }; *rchip = NULL; err = pci_enable_device(pci); if (err < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) { snd_printk(KERN_ERR SFX "cannot allocate chip\n"); pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); mutex_init(&chip->open_mutex); chip->card = card; chip->pci = pci; chip->irq = -1; chip->granularity = granularity[dev]; switch (chip->granularity) { case 8: chip->sample_rate_max = 48000; break; case 16: chip->sample_rate_max = 96000; break; case 32: chip->sample_rate_max = 192000; break; default: snd_printk(KERN_WARNING SFX "Invalid granularity %d, reset to %d\n", chip->granularity, LOLA_GRANULARITY_MAX); chip->granularity = LOLA_GRANULARITY_MAX; chip->sample_rate_max = 192000; break; } chip->sample_rate_min = sample_rate_min[dev]; if (chip->sample_rate_min > chip->sample_rate_max) { snd_printk(KERN_WARNING SFX "Invalid sample_rate_min %d, reset to 16000\n", chip->sample_rate_min); chip->sample_rate_min = 16000; } err = pci_request_regions(pci, DRVNAME); if (err < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->bar[0].addr = pci_resource_start(pci, 0); chip->bar[0].remap_addr = pci_ioremap_bar(pci, 0); chip->bar[1].addr = pci_resource_start(pci, 2); chip->bar[1].remap_addr = pci_ioremap_bar(pci, 2); if (!chip->bar[0].remap_addr || !chip->bar[1].remap_addr) { snd_printk(KERN_ERR SFX "ioremap error\n"); err = -ENXIO; goto errout; } pci_set_master(pci); err = reset_controller(chip); if (err < 0) goto errout; if (request_irq(pci->irq, lola_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { printk(KERN_ERR SFX "unable to grab IRQ %d\n", pci->irq); err = -EBUSY; goto errout; } chip->irq = pci->irq; synchronize_irq(chip->irq); dever = lola_readl(chip, BAR1, DEVER); chip->pcm[CAPT].num_streams = (dever >> 0) & 0x3ff; chip->pcm[PLAY].num_streams = (dever >> 10) & 0x3ff; chip->version = (dever >> 24) & 0xff; snd_printdd(SFX "streams in=%d, out=%d, version=0x%x\n", chip->pcm[CAPT].num_streams, chip->pcm[PLAY].num_streams, chip->version); /* Test LOLA_BAR1_DEVER */ if (chip->pcm[CAPT].num_streams > MAX_STREAM_IN_COUNT || chip->pcm[PLAY].num_streams > MAX_STREAM_OUT_COUNT || (!chip->pcm[CAPT].num_streams && !chip->pcm[PLAY].num_streams)) { printk(KERN_ERR SFX "invalid DEVER = %x\n", dever); err = -EINVAL; goto errout; } err = setup_corb_rirb(chip); if (err < 0) goto errout; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { snd_printk(KERN_ERR SFX "Error creating device [card]!\n"); goto errout; } strcpy(card->driver, "Lola"); strlcpy(card->shortname, "Digigram Lola", sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx irq %i", card->shortname, chip->bar[0].addr, chip->irq); strcpy(card->mixername, card->shortname); lola_irq_enable(chip); chip->initialized = 1; *rchip = chip; return 0; errout: lola_free(chip); return err; } static int __devinit lola_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct lola *chip; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { snd_printk(KERN_ERR SFX "Error creating card!\n"); return err; } snd_card_set_dev(card, &pci->dev); err = lola_create(card, pci, dev, &chip); if (err < 0) goto out_free; card->private_data = chip; err = lola_parse_tree(chip); if (err < 0) goto out_free; err = lola_create_pcm(chip); if (err < 0) goto out_free; err = lola_create_mixer(chip); if (err < 0) goto out_free; lola_proc_debug_new(chip); err = snd_card_register(card); if (err < 0) goto out_free; pci_set_drvdata(pci, card); dev++; return err; out_free: snd_card_free(card); return err; } static void __devexit lola_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } /* PCI IDs */ static DEFINE_PCI_DEVICE_TABLE(lola_ids) = { { PCI_VDEVICE(DIGIGRAM, 0x0001) }, { 0, } }; MODULE_DEVICE_TABLE(pci, lola_ids); /* pci_driver definition */ static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = lola_ids, .probe = lola_probe, .remove = __devexit_p(lola_remove), }; static int __init alsa_card_lola_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_lola_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_lola_init) module_exit(alsa_card_lola_exit)
gpl-2.0
boa19861105/android_422_kernel_htc_dlxub1
drivers/media/dvb/frontends/mb86a20s.c
5155
17714
/* * Fujitu mb86a20s ISDB-T/ISDB-Tsb Module driver * * Copyright (C) 2010 Mauro Carvalho Chehab <mchehab@redhat.com> * Copyright (C) 2009-2010 Douglas Landgraf <dougsland@redhat.com> * * FIXME: Need to port to DVB v5.2 API * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "mb86a20s.h" static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); #define rc(args...) do { \ printk(KERN_ERR "mb86a20s: " args); \ } while (0) #define dprintk(args...) \ do { \ if (debug) { \ printk(KERN_DEBUG "mb86a20s: %s: ", __func__); \ printk(args); \ } \ } while (0) struct mb86a20s_state { struct i2c_adapter *i2c; const struct mb86a20s_config *config; struct dvb_frontend frontend; bool need_init; }; struct regdata { u8 reg; u8 data; }; /* * Initialization sequence: Use whatevere default values that PV SBTVD * does on its initialisation, obtained via USB snoop */ static struct regdata mb86a20s_init[] = { { 0x70, 0x0f }, { 0x70, 0xff }, { 0x08, 0x01 }, { 0x09, 0x3e }, { 0x50, 0xd1 }, { 0x51, 0x22 }, { 0x39, 0x01 }, { 0x71, 0x00 }, { 0x28, 0x2a }, { 0x29, 0x00 }, { 0x2a, 0xff }, { 0x2b, 0x80 }, { 0x28, 0x20 }, { 0x29, 0x33 }, { 0x2a, 0xdf }, { 0x2b, 0xa9 }, { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 }, { 0x3b, 0x21 }, { 0x3c, 0x3a }, { 0x01, 0x0d }, { 0x04, 0x08 }, { 0x05, 0x05 }, { 0x04, 0x0e }, { 0x05, 0x00 }, { 0x04, 0x0f }, { 0x05, 0x14 }, { 0x04, 0x0b }, { 0x05, 0x8c }, { 0x04, 0x00 }, { 0x05, 0x00 }, { 0x04, 0x01 }, { 0x05, 0x07 }, { 0x04, 0x02 }, { 0x05, 0x0f }, { 0x04, 0x03 }, { 0x05, 0xa0 }, { 0x04, 0x09 }, { 0x05, 0x00 }, { 0x04, 0x0a }, { 0x05, 0xff }, { 0x04, 0x27 }, { 0x05, 0x64 }, { 0x04, 0x28 }, { 0x05, 0x00 }, { 0x04, 0x1e }, { 0x05, 0xff }, { 0x04, 0x29 }, { 0x05, 0x0a }, { 0x04, 0x32 }, { 0x05, 0x0a }, { 0x04, 0x14 }, { 0x05, 0x02 }, { 0x04, 0x04 }, { 0x05, 0x00 }, { 0x04, 0x05 }, { 0x05, 0x22 }, { 0x04, 0x06 }, { 0x05, 0x0e }, { 0x04, 0x07 }, { 0x05, 0xd8 }, { 0x04, 0x12 }, { 0x05, 0x00 }, { 0x04, 0x13 }, { 0x05, 0xff }, { 0x04, 0x15 }, { 0x05, 0x4e }, { 0x04, 0x16 }, { 0x05, 0x20 }, { 0x52, 0x01 }, { 0x50, 0xa7 }, { 0x51, 0xff }, { 0x50, 0xa8 }, { 0x51, 0xff }, { 0x50, 0xa9 }, { 0x51, 0xff }, { 0x50, 0xaa }, { 0x51, 0xff }, { 0x50, 0xab }, { 0x51, 0xff }, { 0x50, 0xac }, { 0x51, 0xff }, { 0x50, 0xad }, { 0x51, 0xff }, { 0x50, 0xae }, { 0x51, 0xff }, { 0x50, 0xaf }, { 0x51, 0xff }, { 0x5e, 0x07 }, { 0x50, 0xdc }, { 0x51, 0x01 }, { 0x50, 0xdd }, { 0x51, 0xf4 }, { 0x50, 0xde }, { 0x51, 0x01 }, { 0x50, 0xdf }, { 0x51, 0xf4 }, { 0x50, 0xe0 }, { 0x51, 0x01 }, { 0x50, 0xe1 }, { 0x51, 0xf4 }, { 0x50, 0xb0 }, { 0x51, 0x07 }, { 0x50, 0xb2 }, { 0x51, 0xff }, { 0x50, 0xb3 }, { 0x51, 0xff }, { 0x50, 0xb4 }, { 0x51, 0xff }, { 0x50, 0xb5 }, { 0x51, 0xff }, { 0x50, 0xb6 }, { 0x51, 0xff }, { 0x50, 0xb7 }, { 0x51, 0xff }, { 0x50, 0x50 }, { 0x51, 0x02 }, { 0x50, 0x51 }, { 0x51, 0x04 }, { 0x45, 0x04 }, { 0x48, 0x04 }, { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */ { 0x50, 0xd6 }, { 0x51, 0x1f }, { 0x50, 0xd2 }, { 0x51, 0x03 }, { 0x50, 0xd7 }, { 0x51, 0x3f }, { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x28, 0x74 }, { 0x29, 0x40 }, { 0x28, 0x46 }, { 0x29, 0x2c }, { 0x28, 0x46 }, { 0x29, 0x0c }, { 0x04, 0x40 }, { 0x05, 0x01 }, { 0x28, 0x00 }, { 0x29, 0x10 }, { 0x28, 0x05 }, { 0x29, 0x02 }, { 0x1c, 0x01 }, { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 }, { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d }, { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 }, { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 }, { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 }, { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 }, { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e }, { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e }, { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 }, { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 }, { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 }, { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe }, { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 }, { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee }, { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 }, { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f }, { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 }, { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 }, { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a }, { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc }, { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba }, { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 }, { 0x50, 0x1e }, { 0x51, 0x5d }, { 0x50, 0x22 }, { 0x51, 0x00 }, { 0x50, 0x23 }, { 0x51, 0xc8 }, { 0x50, 0x24 }, { 0x51, 0x00 }, { 0x50, 0x25 }, { 0x51, 0xf0 }, { 0x50, 0x26 }, { 0x51, 0x00 }, { 0x50, 0x27 }, { 0x51, 0xc3 }, { 0x50, 0x39 }, { 0x51, 0x02 }, { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, { 0xd0, 0x00 }, }; static struct regdata mb86a20s_reset_reception[] = { { 0x70, 0xf0 }, { 0x70, 0xff }, { 0x08, 0x01 }, { 0x08, 0x00 }, }; static int mb86a20s_i2c_writereg(struct mb86a20s_state *state, u8 i2c_addr, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = i2c_addr, .flags = 0, .buf = buf, .len = 2 }; int rc; rc = i2c_transfer(state->i2c, &msg, 1); if (rc != 1) { printk("%s: writereg error (rc == %i, reg == 0x%02x," " data == 0x%02x)\n", __func__, rc, reg, data); return rc; } return 0; } static int mb86a20s_i2c_writeregdata(struct mb86a20s_state *state, u8 i2c_addr, struct regdata *rd, int size) { int i, rc; for (i = 0; i < size; i++) { rc = mb86a20s_i2c_writereg(state, i2c_addr, rd[i].reg, rd[i].data); if (rc < 0) return rc; } return 0; } static int mb86a20s_i2c_readreg(struct mb86a20s_state *state, u8 i2c_addr, u8 reg) { u8 val; int rc; struct i2c_msg msg[] = { { .addr = i2c_addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = i2c_addr, .flags = I2C_M_RD, .buf = &val, .len = 1 } }; rc = i2c_transfer(state->i2c, msg, 2); if (rc != 2) { rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc); return rc; } return val; } #define mb86a20s_readreg(state, reg) \ mb86a20s_i2c_readreg(state, state->config->demod_address, reg) #define mb86a20s_writereg(state, reg, val) \ mb86a20s_i2c_writereg(state, state->config->demod_address, reg, val) #define mb86a20s_writeregdata(state, regdata) \ mb86a20s_i2c_writeregdata(state, state->config->demod_address, \ regdata, ARRAY_SIZE(regdata)) static int mb86a20s_initfe(struct dvb_frontend *fe) { struct mb86a20s_state *state = fe->demodulator_priv; int rc; u8 regD5 = 1; dprintk("\n"); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* Initialize the frontend */ rc = mb86a20s_writeregdata(state, mb86a20s_init); if (rc < 0) goto err; if (!state->config->is_serial) { regD5 &= ~1; rc = mb86a20s_writereg(state, 0x50, 0xd5); if (rc < 0) goto err; rc = mb86a20s_writereg(state, 0x51, regD5); if (rc < 0) goto err; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); err: if (rc < 0) { state->need_init = true; printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n"); } else { state->need_init = false; dprintk("Initialization succeeded.\n"); } return rc; } static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct mb86a20s_state *state = fe->demodulator_priv; unsigned rf_max, rf_min, rf; u8 val; dprintk("\n"); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* Does a binary search to get RF strength */ rf_max = 0xfff; rf_min = 0; do { rf = (rf_max + rf_min) / 2; mb86a20s_writereg(state, 0x04, 0x1f); mb86a20s_writereg(state, 0x05, rf >> 8); mb86a20s_writereg(state, 0x04, 0x20); mb86a20s_writereg(state, 0x04, rf); val = mb86a20s_readreg(state, 0x02); if (val & 0x08) rf_min = (rf_max + rf_min) / 2; else rf_max = (rf_max + rf_min) / 2; if (rf_max - rf_min < 4) { *strength = (((rf_max + rf_min) / 2) * 65535) / 4095; break; } } while (1); dprintk("signal strength = %d\n", *strength); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); return 0; } static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct mb86a20s_state *state = fe->demodulator_priv; u8 val; dprintk("\n"); *status = 0; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); val = mb86a20s_readreg(state, 0x0a) & 0xf; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (val >= 2) *status |= FE_HAS_SIGNAL; if (val >= 4) *status |= FE_HAS_CARRIER; if (val >= 5) *status |= FE_HAS_VITERBI; if (val >= 7) *status |= FE_HAS_SYNC; if (val >= 8) /* Maybe 9? */ *status |= FE_HAS_LOCK; dprintk("val = %d, status = 0x%02x\n", val, *status); return 0; } static int mb86a20s_set_frontend(struct dvb_frontend *fe) { struct mb86a20s_state *state = fe->demodulator_priv; int rc; #if 0 /* * FIXME: Properly implement the set frontend properties */ struct dtv_frontend_properties *p = &fe->dtv_property_cache; #endif dprintk("\n"); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); dprintk("Calling tuner set parameters\n"); fe->ops.tuner_ops.set_params(fe); /* * Make it more reliable: if, for some reason, the initial * device initialization doesn't happen, initialize it when * a SBTVD parameters are adjusted. * * Unfortunately, due to a hard to track bug at tda829x/tda18271, * the agc callback logic is not called during DVB attach time, * causing mb86a20s to not be initialized with Kworld SBTVD. * So, this hack is needed, in order to make Kworld SBTVD to work. */ if (state->need_init) mb86a20s_initfe(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); return rc; } static int mb86a20s_get_modulation(struct mb86a20s_state *state, unsigned layer) { int rc; static unsigned char reg[] = { [0] = 0x86, /* Layer A */ [1] = 0x8a, /* Layer B */ [2] = 0x8e, /* Layer C */ }; if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) return rc; rc = mb86a20s_readreg(state, 0x6e); if (rc < 0) return rc; switch ((rc & 0x70) >> 4) { case 0: return DQPSK; case 1: return QPSK; case 2: return QAM_16; case 3: return QAM_64; default: return QAM_AUTO; } } static int mb86a20s_get_fec(struct mb86a20s_state *state, unsigned layer) { int rc; static unsigned char reg[] = { [0] = 0x87, /* Layer A */ [1] = 0x8b, /* Layer B */ [2] = 0x8f, /* Layer C */ }; if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) return rc; rc = mb86a20s_readreg(state, 0x6e); if (rc < 0) return rc; switch (rc) { case 0: return FEC_1_2; case 1: return FEC_2_3; case 2: return FEC_3_4; case 3: return FEC_5_6; case 4: return FEC_7_8; default: return FEC_AUTO; } } static int mb86a20s_get_interleaving(struct mb86a20s_state *state, unsigned layer) { int rc; static unsigned char reg[] = { [0] = 0x88, /* Layer A */ [1] = 0x8c, /* Layer B */ [2] = 0x90, /* Layer C */ }; if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) return rc; rc = mb86a20s_readreg(state, 0x6e); if (rc < 0) return rc; if (rc > 3) return -EINVAL; /* Not used */ return rc; } static int mb86a20s_get_segment_count(struct mb86a20s_state *state, unsigned layer) { int rc, count; static unsigned char reg[] = { [0] = 0x89, /* Layer A */ [1] = 0x8d, /* Layer B */ [2] = 0x91, /* Layer C */ }; if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) return rc; rc = mb86a20s_readreg(state, 0x6e); if (rc < 0) return rc; count = (rc >> 4) & 0x0f; return count; } static int mb86a20s_get_frontend(struct dvb_frontend *fe) { struct mb86a20s_state *state = fe->demodulator_priv; struct dtv_frontend_properties *p = &fe->dtv_property_cache; int i, rc; /* Fixed parameters */ p->delivery_system = SYS_ISDBT; p->bandwidth_hz = 6000000; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* Check for partial reception */ rc = mb86a20s_writereg(state, 0x6d, 0x85); if (rc >= 0) rc = mb86a20s_readreg(state, 0x6e); if (rc >= 0) p->isdbt_partial_reception = (rc & 0x10) ? 1 : 0; /* Get per-layer data */ p->isdbt_layer_enabled = 0; for (i = 0; i < 3; i++) { rc = mb86a20s_get_segment_count(state, i); if (rc >= 0 && rc < 14) p->layer[i].segment_count = rc; if (rc == 0x0f) continue; p->isdbt_layer_enabled |= 1 << i; rc = mb86a20s_get_modulation(state, i); if (rc >= 0) p->layer[i].modulation = rc; rc = mb86a20s_get_fec(state, i); if (rc >= 0) p->layer[i].fec = rc; rc = mb86a20s_get_interleaving(state, i); if (rc >= 0) p->layer[i].interleaving = rc; } p->isdbt_sb_mode = 0; rc = mb86a20s_writereg(state, 0x6d, 0x84); if ((rc >= 0) && ((rc & 0x60) == 0x20)) { p->isdbt_sb_mode = 1; /* At least, one segment should exist */ if (!p->isdbt_sb_segment_count) p->isdbt_sb_segment_count = 1; } else p->isdbt_sb_segment_count = 0; /* Get transmission mode and guard interval */ p->transmission_mode = TRANSMISSION_MODE_AUTO; p->guard_interval = GUARD_INTERVAL_AUTO; rc = mb86a20s_readreg(state, 0x07); if (rc >= 0) { if ((rc & 0x60) == 0x20) { switch (rc & 0x0c >> 2) { case 0: p->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: p->transmission_mode = TRANSMISSION_MODE_4K; break; case 2: p->transmission_mode = TRANSMISSION_MODE_8K; break; } } if (!(rc & 0x10)) { switch (rc & 0x3) { case 0: p->guard_interval = GUARD_INTERVAL_1_4; break; case 1: p->guard_interval = GUARD_INTERVAL_1_8; break; case 2: p->guard_interval = GUARD_INTERVAL_1_16; break; } } } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); return 0; } static int mb86a20s_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { int rc = 0; dprintk("\n"); if (re_tune) rc = mb86a20s_set_frontend(fe); if (!(mode_flags & FE_TUNE_MODE_ONESHOT)) mb86a20s_read_status(fe, status); return rc; } static void mb86a20s_release(struct dvb_frontend *fe) { struct mb86a20s_state *state = fe->demodulator_priv; dprintk("\n"); kfree(state); } static struct dvb_frontend_ops mb86a20s_ops; struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, struct i2c_adapter *i2c) { u8 rev; /* allocate memory for the internal state */ struct mb86a20s_state *state = kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL); dprintk("\n"); if (state == NULL) { rc("Unable to kzalloc\n"); goto error; } /* setup the state */ state->config = config; state->i2c = i2c; /* create dvb_frontend */ memcpy(&state->frontend.ops, &mb86a20s_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; /* Check if it is a mb86a20s frontend */ rev = mb86a20s_readreg(state, 0); if (rev == 0x13) { printk(KERN_INFO "Detected a Fujitsu mb86a20s frontend\n"); } else { printk(KERN_ERR "Frontend revision %d is unknown - aborting.\n", rev); goto error; } return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(mb86a20s_attach); static struct dvb_frontend_ops mb86a20s_ops = { .delsys = { SYS_ISDBT }, /* Use dib8000 values per default */ .info = { .name = "Fujitsu mb86A20s", .caps = FE_CAN_INVERSION_AUTO | FE_CAN_RECOVER | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_QAM_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, /* Actually, those values depend on the used tuner */ .frequency_min = 45000000, .frequency_max = 864000000, .frequency_stepsize = 62500, }, .release = mb86a20s_release, .init = mb86a20s_initfe, .set_frontend = mb86a20s_set_frontend, .get_frontend = mb86a20s_get_frontend, .read_status = mb86a20s_read_status, .read_signal_strength = mb86a20s_read_signal_strength, .tune = mb86a20s_tune, }; MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
jmztaylor/android_kernel_lge_ls720
sound/soc/samsung/s3c-i2s-v2.c
5411
18057
/* sound/soc/samsung/s3c-i2c-v2.c * * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs. * * Copyright (c) 2006 Wolfson Microelectronics PLC. * Graeme Gregory graeme.gregory@wolfsonmicro.com * linux@wolfsonmicro.com * * Copyright (c) 2008, 2007, 2004-2005 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <mach/dma.h> #include "regs-i2s-v2.h" #include "s3c-i2s-v2.h" #include "dma.h" #undef S3C_IIS_V2_SUPPORTED #if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) \ || defined(CONFIG_CPU_S5PV210) #define S3C_IIS_V2_SUPPORTED #endif #ifdef CONFIG_PLAT_S3C64XX #define S3C_IIS_V2_SUPPORTED #endif #ifndef S3C_IIS_V2_SUPPORTED #error Unsupported CPU model #endif #define S3C2412_I2S_DEBUG_CON 0 static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai) { return snd_soc_dai_get_drvdata(cpu_dai); } #define bit_set(v, b) (((v) & (b)) ? 1 : 0) #if S3C2412_I2S_DEBUG_CON static void dbg_showcon(const char *fn, u32 con) { printk(KERN_DEBUG "%s: LRI=%d, TXFEMPT=%d, RXFEMPT=%d, TXFFULL=%d, RXFFULL=%d\n", fn, bit_set(con, S3C2412_IISCON_LRINDEX), bit_set(con, S3C2412_IISCON_TXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_RXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_TXFIFO_FULL), bit_set(con, S3C2412_IISCON_RXFIFO_FULL)); printk(KERN_DEBUG "%s: PAUSE: TXDMA=%d, RXDMA=%d, TXCH=%d, RXCH=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_PAUSE), bit_set(con, S3C2412_IISCON_RXDMA_PAUSE), bit_set(con, S3C2412_IISCON_TXCH_PAUSE), bit_set(con, S3C2412_IISCON_RXCH_PAUSE)); printk(KERN_DEBUG "%s: ACTIVE: TXDMA=%d, RXDMA=%d, IIS=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_RXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_IIS_ACTIVE)); } #else static inline void dbg_showcon(const char *fn, u32 con) { } #endif /* Turn on or off the transmission path. */ static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on) { void __iomem *regs = i2s->regs; u32 fic, con, mod; pr_debug("%s(%d)\n", __func__, on); fic = readl(regs + S3C2412_IISFIC); con = readl(regs + S3C2412_IISCON); mod = readl(regs + S3C2412_IISMOD); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); if (on) { con |= S3C2412_IISCON_TXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE; con &= ~S3C2412_IISCON_TXDMA_PAUSE; con &= ~S3C2412_IISCON_TXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXONLY: case S3C2412_IISMOD_MODE_TXRX: /* do nothing, we are in the right mode */ break; case S3C2412_IISMOD_MODE_RXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXRX; break; default: dev_err(i2s->dev, "TXEN: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); break; } writel(con, regs + S3C2412_IISCON); writel(mod, regs + S3C2412_IISMOD); } else { /* Note, we do not have any indication that the FIFO problems * tha the S3C2410/2440 had apply here, so we should be able * to disable the DMA and TX without resetting the FIFOS. */ con |= S3C2412_IISCON_TXDMA_PAUSE; con |= S3C2412_IISCON_TXCH_PAUSE; con &= ~S3C2412_IISCON_TXDMA_ACTIVE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXRX: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_RXONLY; break; case S3C2412_IISMOD_MODE_TXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; con &= ~S3C2412_IISCON_IIS_ACTIVE; break; default: dev_err(i2s->dev, "TXDIS: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); break; } writel(mod, regs + S3C2412_IISMOD); writel(con, regs + S3C2412_IISCON); } fic = readl(regs + S3C2412_IISFIC); dbg_showcon(__func__, con); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); } static void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on) { void __iomem *regs = i2s->regs; u32 fic, con, mod; pr_debug("%s(%d)\n", __func__, on); fic = readl(regs + S3C2412_IISFIC); con = readl(regs + S3C2412_IISCON); mod = readl(regs + S3C2412_IISMOD); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); if (on) { con |= S3C2412_IISCON_RXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE; con &= ~S3C2412_IISCON_RXDMA_PAUSE; con &= ~S3C2412_IISCON_RXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXRX: case S3C2412_IISMOD_MODE_RXONLY: /* do nothing, we are in the right mode */ break; case S3C2412_IISMOD_MODE_TXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXRX; break; default: dev_err(i2s->dev, "RXEN: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); } writel(mod, regs + S3C2412_IISMOD); writel(con, regs + S3C2412_IISCON); } else { /* See txctrl notes on FIFOs. */ con &= ~S3C2412_IISCON_RXDMA_ACTIVE; con |= S3C2412_IISCON_RXDMA_PAUSE; con |= S3C2412_IISCON_RXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_RXONLY: con &= ~S3C2412_IISCON_IIS_ACTIVE; mod &= ~S3C2412_IISMOD_MODE_MASK; break; case S3C2412_IISMOD_MODE_TXRX: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXONLY; break; default: dev_err(i2s->dev, "RXDIS: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); } writel(con, regs + S3C2412_IISCON); writel(mod, regs + S3C2412_IISMOD); } fic = readl(regs + S3C2412_IISFIC); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); } #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) /* * Wait for the LR signal to allow synchronisation to the L/R clock * from the codec. May only be needed for slave mode. */ static int s3c2412_snd_lrsync(struct s3c_i2sv2_info *i2s) { u32 iiscon; unsigned long loops = msecs_to_loops(5); pr_debug("Entered %s\n", __func__); while (--loops) { iiscon = readl(i2s->regs + S3C2412_IISCON); if (iiscon & S3C2412_IISCON_LRINDEX) break; cpu_relax(); } if (!loops) { printk(KERN_ERR "%s: timeout\n", __func__); return -ETIMEDOUT; } return 0; } /* * Set S3C2412 I2S DAI format */ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod; pr_debug("Entered %s\n", __func__); iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("hw_params r: IISMOD: %x \n", iismod); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: i2s->master = 0; iismod |= S3C2412_IISMOD_SLAVE; break; case SND_SOC_DAIFMT_CBS_CFS: i2s->master = 1; iismod &= ~S3C2412_IISMOD_SLAVE; break; default: pr_err("unknwon master/slave format\n"); return -EINVAL; } iismod &= ~S3C2412_IISMOD_SDF_MASK; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: iismod |= S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_MSB; break; case SND_SOC_DAIFMT_LEFT_J: iismod |= S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_LSB; break; case SND_SOC_DAIFMT_I2S: iismod &= ~S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_IIS; break; default: pr_err("Unknown data format\n"); return -EINVAL; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("hw_params w: IISMOD: %x \n", iismod); return 0; } static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); struct s3c_dma_params *dma_data; u32 iismod; pr_debug("Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_data = i2s->dma_playback; else dma_data = i2s->dma_capture; snd_soc_dai_set_dma_data(dai, substream, dma_data); /* Working copies of register */ iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("%s: r: IISMOD: %x\n", __func__, iismod); iismod &= ~S3C64XX_IISMOD_BLC_MASK; /* Sample size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: iismod |= S3C64XX_IISMOD_BLC_8BIT; break; case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S24_LE: iismod |= S3C64XX_IISMOD_BLC_24BIT; break; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s: w: IISMOD: %x\n", __func__, iismod); return 0; } static int s3c_i2sv2_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("Entered %s\n", __func__); pr_debug("%s r: IISMOD: %x\n", __func__, iismod); switch (clk_id) { case S3C_I2SV2_CLKSRC_PCLK: iismod &= ~S3C2412_IISMOD_IMS_SYSMUX; break; case S3C_I2SV2_CLKSRC_AUDIOBUS: iismod |= S3C2412_IISMOD_IMS_SYSMUX; break; case S3C_I2SV2_CLKSRC_CDCLK: /* Error if controller doesn't have the CDCLKCON bit */ if (!(i2s->feature & S3C_FEATURE_CDCLKCON)) return -EINVAL; switch (dir) { case SND_SOC_CLOCK_IN: iismod |= S3C64XX_IISMOD_CDCLKCON; break; case SND_SOC_CLOCK_OUT: iismod &= ~S3C64XX_IISMOD_CDCLKCON; break; default: return -EINVAL; } break; default: return -EINVAL; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s w: IISMOD: %x\n", __func__, iismod); return 0; } static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai); int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); unsigned long irqs; int ret = 0; struct s3c_dma_params *dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); pr_debug("Entered %s\n", __func__); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* On start, ensure that the FIFOs are cleared and reset. */ writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); /* clear again, just in case */ writel(0x0, i2s->regs + S3C2412_IISFIC); case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!i2s->master) { ret = s3c2412_snd_lrsync(i2s); if (ret) goto exit_err; } local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 1); else s3c2412_snd_txctrl(i2s, 1); local_irq_restore(irqs); /* * Load the next buffer to DMA to meet the reqirement * of the auto reload mechanism of S3C24XX. * This call won't bother S3C64XX. */ s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 0); else s3c2412_snd_txctrl(i2s, 0); local_irq_restore(irqs); break; default: ret = -EINVAL; break; } exit_err: return ret; } /* * Set S3C2412 Clock dividers */ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 reg; pr_debug("%s(%p, %d, %d)\n", __func__, cpu_dai, div_id, div); switch (div_id) { case S3C_I2SV2_DIV_BCLK: switch (div) { case 16: div = S3C2412_IISMOD_BCLK_16FS; break; case 32: div = S3C2412_IISMOD_BCLK_32FS; break; case 24: div = S3C2412_IISMOD_BCLK_24FS; break; case 48: div = S3C2412_IISMOD_BCLK_48FS; break; default: return -EINVAL; } reg = readl(i2s->regs + S3C2412_IISMOD); reg &= ~S3C2412_IISMOD_BCLK_MASK; writel(reg | div, i2s->regs + S3C2412_IISMOD); pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD)); break; case S3C_I2SV2_DIV_RCLK: switch (div) { case 256: div = S3C2412_IISMOD_RCLK_256FS; break; case 384: div = S3C2412_IISMOD_RCLK_384FS; break; case 512: div = S3C2412_IISMOD_RCLK_512FS; break; case 768: div = S3C2412_IISMOD_RCLK_768FS; break; default: return -EINVAL; } reg = readl(i2s->regs + S3C2412_IISMOD); reg &= ~S3C2412_IISMOD_RCLK_MASK; writel(reg | div, i2s->regs + S3C2412_IISMOD); pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD)); break; case S3C_I2SV2_DIV_PRESCALER: if (div >= 0) { writel((div << 8) | S3C2412_IISPSR_PSREN, i2s->regs + S3C2412_IISPSR); } else { writel(0x0, i2s->regs + S3C2412_IISPSR); } pr_debug("%s: PSR=%08x\n", __func__, readl(i2s->regs + S3C2412_IISPSR)); break; default: return -EINVAL; } return 0; } static snd_pcm_sframes_t s3c2412_i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); u32 reg = readl(i2s->regs + S3C2412_IISFIC); snd_pcm_sframes_t delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) delay = S3C2412_IISFIC_TXCOUNT(reg); else delay = S3C2412_IISFIC_RXCOUNT(reg); return delay; } struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod = readl(i2s->regs + S3C2412_IISMOD); if (iismod & S3C2412_IISMOD_IMS_SYSMUX) return i2s->iis_cclk; else return i2s->iis_pclk; } EXPORT_SYMBOL_GPL(s3c_i2sv2_get_clock); /* default table of all avaialable root fs divisors */ static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 }; int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info, unsigned int *fstab, unsigned int rate, struct clk *clk) { unsigned long clkrate = clk_get_rate(clk); unsigned int div; unsigned int fsclk; unsigned int actual; unsigned int fs; unsigned int fsdiv; signed int deviation = 0; unsigned int best_fs = 0; unsigned int best_div = 0; unsigned int best_rate = 0; unsigned int best_deviation = INT_MAX; pr_debug("Input clock rate %ldHz\n", clkrate); if (fstab == NULL) fstab = iis_fs_tab; for (fs = 0; fs < ARRAY_SIZE(iis_fs_tab); fs++) { fsdiv = iis_fs_tab[fs]; fsclk = clkrate / fsdiv; div = fsclk / rate; if ((fsclk % rate) > (rate / 2)) div++; if (div <= 1) continue; actual = clkrate / (fsdiv * div); deviation = actual - rate; printk(KERN_DEBUG "%ufs: div %u => result %u, deviation %d\n", fsdiv, div, actual, deviation); deviation = abs(deviation); if (deviation < best_deviation) { best_fs = fsdiv; best_div = div; best_rate = actual; best_deviation = deviation; } if (deviation == 0) break; } printk(KERN_DEBUG "best: fs=%u, div=%u, rate=%u\n", best_fs, best_div, best_rate); info->fs_div = best_fs; info->clk_div = best_div; return 0; } EXPORT_SYMBOL_GPL(s3c_i2sv2_iis_calc_rate); int s3c_i2sv2_probe(struct snd_soc_dai *dai, struct s3c_i2sv2_info *i2s, unsigned long base) { struct device *dev = dai->dev; unsigned int iismod; i2s->dev = dev; /* record our i2s structure for later use in the callbacks */ snd_soc_dai_set_drvdata(dai, i2s); i2s->regs = ioremap(base, 0x100); if (i2s->regs == NULL) { dev_err(dev, "cannot ioremap registers\n"); return -ENXIO; } i2s->iis_pclk = clk_get(dev, "iis"); if (IS_ERR(i2s->iis_pclk)) { dev_err(dev, "failed to get iis_clock\n"); iounmap(i2s->regs); return -ENOENT; } clk_enable(i2s->iis_pclk); /* Mark ourselves as in TXRX mode so we can run through our cleanup * process without warnings. */ iismod = readl(i2s->regs + S3C2412_IISMOD); iismod |= S3C2412_IISMOD_MODE_TXRX; writel(iismod, i2s->regs + S3C2412_IISMOD); s3c2412_snd_txctrl(i2s, 0); s3c2412_snd_rxctrl(i2s, 0); return 0; } EXPORT_SYMBOL_GPL(s3c_i2sv2_probe); #ifdef CONFIG_PM static int s3c2412_i2s_suspend(struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); u32 iismod; if (dai->active) { i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD); i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON); i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR); /* some basic suspend checks */ iismod = readl(i2s->regs + S3C2412_IISMOD); if (iismod & S3C2412_IISCON_RXDMA_ACTIVE) pr_warning("%s: RXDMA active?\n", __func__); if (iismod & S3C2412_IISCON_TXDMA_ACTIVE) pr_warning("%s: TXDMA active?\n", __func__); if (iismod & S3C2412_IISCON_IIS_ACTIVE) pr_warning("%s: IIS active\n", __func__); } return 0; } static int s3c2412_i2s_resume(struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); pr_info("dai_active %d, IISMOD %08x, IISCON %08x\n", dai->active, i2s->suspend_iismod, i2s->suspend_iiscon); if (dai->active) { writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON); writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD); writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR); writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); ndelay(250); writel(0x0, i2s->regs + S3C2412_IISFIC); } return 0; } #else #define s3c2412_i2s_suspend NULL #define s3c2412_i2s_resume NULL #endif int s3c_i2sv2_register_dai(struct device *dev, int id, struct snd_soc_dai_driver *drv) { struct snd_soc_dai_ops *ops = drv->ops; ops->trigger = s3c2412_i2s_trigger; if (!ops->hw_params) ops->hw_params = s3c_i2sv2_hw_params; ops->set_fmt = s3c2412_i2s_set_fmt; ops->set_clkdiv = s3c2412_i2s_set_clkdiv; ops->set_sysclk = s3c_i2sv2_set_sysclk; /* Allow overriding by (for example) IISv4 */ if (!ops->delay) ops->delay = s3c2412_i2s_delay; drv->suspend = s3c2412_i2s_suspend; drv->resume = s3c2412_i2s_resume; return snd_soc_register_dai(dev, drv); } EXPORT_SYMBOL_GPL(s3c_i2sv2_register_dai); MODULE_LICENSE("GPL");
gpl-2.0
mandfx/android_kernel_huawei_c8816
arch/sh/kernel/cpu/sh3/probe.c
10531
3037
/* * arch/sh/kernel/cpu/sh3/probe.c * * CPU Subtype Probing for SH-3. * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2002 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> void __cpuinit cpu_probe(void) { unsigned long addr0, addr1, data0, data1, data2, data3; jump_to_uncached(); /* * Check if the entry shadows or not. * When shadowed, it's 128-entry system. * Otherwise, it's 256-entry system. */ addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12); addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12); /* First, write back & invalidate */ data0 = __raw_readl(addr0); __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0); data1 = __raw_readl(addr1); __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1); /* Next, check if there's shadow or not */ data0 = __raw_readl(addr0); data0 ^= SH_CACHE_VALID; __raw_writel(data0, addr0); data1 = __raw_readl(addr1); data2 = data1 ^ SH_CACHE_VALID; __raw_writel(data2, addr1); data3 = __raw_readl(addr0); /* Lastly, invaliate them. */ __raw_writel(data0&~SH_CACHE_VALID, addr0); __raw_writel(data2&~SH_CACHE_VALID, addr1); back_to_cached(); boot_cpu_data.dcache.ways = 4; boot_cpu_data.dcache.entry_shift = 4; boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; boot_cpu_data.dcache.flags = 0; /* * 7709A/7729 has 16K cache (256-entry), while 7702 has only * 2K(direct) 7702 is not supported (yet) */ if (data0 == data1 && data2 == data3) { /* Shadow */ boot_cpu_data.dcache.way_incr = (1 << 11); boot_cpu_data.dcache.entry_mask = 0x7f0; boot_cpu_data.dcache.sets = 128; boot_cpu_data.type = CPU_SH7708; boot_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC; } else { /* 7709A or 7729 */ boot_cpu_data.dcache.way_incr = (1 << 12); boot_cpu_data.dcache.entry_mask = 0xff0; boot_cpu_data.dcache.sets = 256; boot_cpu_data.type = CPU_SH7729; #if defined(CONFIG_CPU_SUBTYPE_SH7706) boot_cpu_data.type = CPU_SH7706; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7710) boot_cpu_data.type = CPU_SH7710; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7712) boot_cpu_data.type = CPU_SH7712; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7720) boot_cpu_data.type = CPU_SH7720; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7721) boot_cpu_data.type = CPU_SH7721; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7705) boot_cpu_data.type = CPU_SH7705; #if defined(CONFIG_SH7705_CACHE_32KB) boot_cpu_data.dcache.way_incr = (1 << 13); boot_cpu_data.dcache.entry_mask = 0x1ff0; boot_cpu_data.dcache.sets = 512; __raw_writel(CCR_CACHE_32KB, CCR3_REG); #else __raw_writel(CCR_CACHE_16KB, CCR3_REG); #endif #endif } /* * SH-3 doesn't have separate caches */ boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; boot_cpu_data.icache = boot_cpu_data.dcache; boot_cpu_data.family = CPU_FAMILY_SH3; }
gpl-2.0
unixbhaskar/Linux-kernel
arch/powerpc/mm/40x_mmu.c
11811
4488
/* * This file contains the routines for initializing the MMU * on the 4xx series of chips. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/uaccess.h> #include <asm/smp.h> #include <asm/bootx.h> #include <asm/machdep.h> #include <asm/setup.h> #include "mmu_decl.h" extern int __map_without_ltlbs; /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. At * present, we utilize only two of the 4xx's zones. * The zone index bits (of ZSEL) in the PTE are used for software * indicators, except the LSB. For user access, zone 1 is used, * for kernel access, zone 0 is used. We set all but zone 1 * to zero, allowing only kernel access as indicated in the PTE. * For zone 1, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ mtspr(SPRN_ZPR, 0x10000000); flush_instruction_cache(); /* * Set up the real-mode cache parameters for the exception vector * handlers (which are run in real-mode). */ mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */ /* * Cache instruction and data space where the exception * vectors and the kernel live in real-mode. */ mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ } #define LARGE_PAGE_SIZE_16M (1<<24) #define LARGE_PAGE_SIZE_4M (1<<22) unsigned long __init mmu_mapin_ram(unsigned long top) { unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; p = 0; s = total_lowmem; if (__map_without_ltlbs) return 0; while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; s -= LARGE_PAGE_SIZE_16M; } while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp) = val; v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; s -= LARGE_PAGE_SIZE_4M; } mapped = total_lowmem - s; /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 16 and 4 MiB * pages. Consequently, restrict the top end of RAM currently * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ memblock_set_current_limit(mapped); return mapped; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* 40x can only access 16MB at the moment (see head_40x.S) */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); }
gpl-2.0
huz123/bricked.tenderloin
arch/mips/lib/iomap.c
12579
4626
/* * Implement the default iomap interfaces * * (C) Copyright 2004 Linus Torvalds * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org> * (C) Copyright 2007 MIPS Technologies, Inc. * written by Ralf Baechle <ralf@linux-mips.org> */ #include <linux/module.h> #include <asm/io.h> /* * Read/write from/to an (offsettable) iomem cookie. It might be a PIO * access or a MMIO access, these functions don't care. The info is * encoded in the hardware mapping set up by the mapping functions * (or the cookie itself, depending on implementation and hw). * * The generic routines don't assume any hardware mappings, and just * encode the PIO/MMIO as part of the cookie. They coldly assume that * the MMIO IO mappings are not in the low address range. * * Architectures for which this is not true can't use this generic * implementation and should do their own copy. */ #define PIO_MASK 0x0ffffUL unsigned int ioread8(void __iomem *addr) { return readb(addr); } EXPORT_SYMBOL(ioread8); unsigned int ioread16(void __iomem *addr) { return readw(addr); } EXPORT_SYMBOL(ioread16); unsigned int ioread16be(void __iomem *addr) { return be16_to_cpu(__raw_readw(addr)); } EXPORT_SYMBOL(ioread16be); unsigned int ioread32(void __iomem *addr) { return readl(addr); } EXPORT_SYMBOL(ioread32); unsigned int ioread32be(void __iomem *addr) { return be32_to_cpu(__raw_readl(addr)); } EXPORT_SYMBOL(ioread32be); void iowrite8(u8 val, void __iomem *addr) { writeb(val, addr); } EXPORT_SYMBOL(iowrite8); void iowrite16(u16 val, void __iomem *addr) { writew(val, addr); } EXPORT_SYMBOL(iowrite16); void iowrite16be(u16 val, void __iomem *addr) { __raw_writew(cpu_to_be16(val), addr); } EXPORT_SYMBOL(iowrite16be); void iowrite32(u32 val, void __iomem *addr) { writel(val, addr); } EXPORT_SYMBOL(iowrite32); void iowrite32be(u32 val, void __iomem *addr) { __raw_writel(cpu_to_be32(val), addr); } EXPORT_SYMBOL(iowrite32be); /* * These are the "repeat MMIO read/write" functions. * Note the "__raw" accesses, since we don't want to * convert to CPU byte order. We write in "IO byte * order" (we also don't have IO barriers). */ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); *dst = data; dst++; } } static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); *dst = data; dst++; } } static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); *dst = data; dst++; } } static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) { while (--count >= 0) { __raw_writeb(*src, addr); src++; } } static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) { while (--count >= 0) { __raw_writew(*src, addr); src++; } } static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) { while (--count >= 0) { __raw_writel(*src, addr); src++; } } void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) { mmio_insb(addr, dst, count); } EXPORT_SYMBOL(ioread8_rep); void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) { mmio_insw(addr, dst, count); } EXPORT_SYMBOL(ioread16_rep); void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) { mmio_insl(addr, dst, count); } EXPORT_SYMBOL(ioread32_rep); void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) { mmio_outsb(addr, src, count); } EXPORT_SYMBOL(iowrite8_rep); void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) { mmio_outsw(addr, src, count); } EXPORT_SYMBOL(iowrite16_rep); void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) { mmio_outsl(addr, src, count); } EXPORT_SYMBOL(iowrite32_rep); /* * Create a virtual mapping cookie for an IO port range * * This uses the same mapping are as the in/out family which has to be setup * by the platform initialization code. * * Just to make matters somewhat more interesting on MIPS systems with * multiple host bridge each will have it's own ioport address space. */ static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr) { return (void __iomem *) (mips_io_port_base + port); } void __iomem *ioport_map(unsigned long port, unsigned int nr) { if (port > PIO_MASK) return NULL; return ioport_map_legacy(port, nr); } EXPORT_SYMBOL(ioport_map); void ioport_unmap(void __iomem *addr) { /* Nothing to do */ } EXPORT_SYMBOL(ioport_unmap);
gpl-2.0
Lloir/Prometheus_kernel_golf
drivers/scsi/aic94xx/aic94xx_sds.c
13091
37328
/* * Aic94xx SAS/SATA driver access to shared data structures and memory * maps. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include "aic94xx.h" #include "aic94xx_reg.h" #include "aic94xx_sds.h" /* ---------- OCM stuff ---------- */ struct asd_ocm_dir_ent { u8 type; u8 offs[3]; u8 _r1; u8 size[3]; } __attribute__ ((packed)); struct asd_ocm_dir { char sig[2]; u8 _r1[2]; u8 major; /* 0 */ u8 minor; /* 0 */ u8 _r2; u8 num_de; struct asd_ocm_dir_ent entry[15]; } __attribute__ ((packed)); #define OCM_DE_OCM_DIR 0x00 #define OCM_DE_WIN_DRVR 0x01 #define OCM_DE_BIOS_CHIM 0x02 #define OCM_DE_RAID_ENGN 0x03 #define OCM_DE_BIOS_INTL 0x04 #define OCM_DE_BIOS_CHIM_OSM 0x05 #define OCM_DE_BIOS_CHIM_DYNAMIC 0x06 #define OCM_DE_ADDC2C_RES0 0x07 #define OCM_DE_ADDC2C_RES1 0x08 #define OCM_DE_ADDC2C_RES2 0x09 #define OCM_DE_ADDC2C_RES3 0x0A #define OCM_INIT_DIR_ENTRIES 5 /*************************************************************************** * OCM directory default ***************************************************************************/ static struct asd_ocm_dir OCMDirInit = { .sig = {0x4D, 0x4F}, /* signature */ .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */ }; /*************************************************************************** * OCM directory Entries default ***************************************************************************/ static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] = { { .type = (OCM_DE_ADDC2C_RES0), /* Entry type */ .offs = {128}, /* Offset */ .size = {0, 4}, /* size */ }, { .type = (OCM_DE_ADDC2C_RES1), /* Entry type */ .offs = {128, 4}, /* Offset */ .size = {0, 4}, /* size */ }, { .type = (OCM_DE_ADDC2C_RES2), /* Entry type */ .offs = {128, 8}, /* Offset */ .size = {0, 4}, /* size */ }, { .type = (OCM_DE_ADDC2C_RES3), /* Entry type */ .offs = {128, 12}, /* Offset */ .size = {0, 4}, /* size */ }, { .type = (OCM_DE_WIN_DRVR), /* Entry type */ .offs = {128, 16}, /* Offset */ .size = {128, 235, 1}, /* size */ }, }; struct asd_bios_chim_struct { char sig[4]; u8 major; /* 1 */ u8 minor; /* 0 */ u8 bios_major; u8 bios_minor; __le32 bios_build; u8 flags; u8 pci_slot; __le16 ue_num; __le16 ue_size; u8 _r[14]; /* The unit element array is right here. */ } __attribute__ ((packed)); /** * asd_read_ocm_seg - read an on chip memory (OCM) segment * @asd_ha: pointer to the host adapter structure * @buffer: where to write the read data * @offs: offset into OCM where to read from * @size: how many bytes to read * * Return the number of bytes not read. Return 0 on success. */ static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, u32 offs, int size) { u8 *p = buffer; if (unlikely(asd_ha->iospace)) asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); else { for ( ; size > 0; size--, offs++, p++) *p = asd_read_ocm_byte(asd_ha, offs); } return size; } static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha, struct asd_ocm_dir *dir, u32 offs) { int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir)); if (err) { ASD_DPRINTK("couldn't read ocm segment\n"); return err; } if (dir->sig[0] != 'M' || dir->sig[1] != 'O') { ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n", dir->sig[0], dir->sig[1]); return -ENOENT; } if (dir->major != 0) { asd_printk("unsupported major version of ocm dir:0x%x\n", dir->major); return -ENOENT; } dir->num_de &= 0xf; return 0; } /** * asd_write_ocm_seg - write an on chip memory (OCM) segment * @asd_ha: pointer to the host adapter structure * @buffer: where to read the write data * @offs: offset into OCM to write to * @size: how many bytes to write * * Return the number of bytes not written. Return 0 on success. */ static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, u32 offs, int size) { u8 *p = buffer; if (unlikely(asd_ha->iospace)) asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); else { for ( ; size > 0; size--, offs++, p++) asd_write_ocm_byte(asd_ha, offs, *p); } return; } #define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16)) static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type, u32 *offs, u32 *size) { int i; struct asd_ocm_dir_ent *ent; for (i = 0; i < dir->num_de; i++) { if (dir->entry[i].type == type) break; } if (i >= dir->num_de) return -ENOENT; ent = &dir->entry[i]; *offs = (u32) THREE_TO_NUM(ent->offs); *size = (u32) THREE_TO_NUM(ent->size); return 0; } #define OCM_BIOS_CHIM_DE 2 #define BC_BIOS_PRESENT 1 static int asd_get_bios_chim(struct asd_ha_struct *asd_ha, struct asd_ocm_dir *dir) { int err; struct asd_bios_chim_struct *bc_struct; u32 offs, size; err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size); if (err) { ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n"); goto out; } err = -ENOMEM; bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL); if (!bc_struct) { asd_printk("no memory for bios_chim struct\n"); goto out; } err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs, sizeof(*bc_struct)); if (err) { ASD_DPRINTK("couldn't read ocm segment\n"); goto out2; } if (strncmp(bc_struct->sig, "SOIB", 4) && strncmp(bc_struct->sig, "IPSA", 4)) { ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n", bc_struct->sig[0], bc_struct->sig[1], bc_struct->sig[2], bc_struct->sig[3]); err = -ENOENT; goto out2; } if (bc_struct->major != 1) { asd_printk("BIOS_CHIM unsupported major version:0x%x\n", bc_struct->major); err = -ENOENT; goto out2; } if (bc_struct->flags & BC_BIOS_PRESENT) { asd_ha->hw_prof.bios.present = 1; asd_ha->hw_prof.bios.maj = bc_struct->bios_major; asd_ha->hw_prof.bios.min = bc_struct->bios_minor; asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build); ASD_DPRINTK("BIOS present (%d,%d), %d\n", asd_ha->hw_prof.bios.maj, asd_ha->hw_prof.bios.min, asd_ha->hw_prof.bios.bld); } asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num); asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size); ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num, asd_ha->hw_prof.ue.size); size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size; if (size > 0) { err = -ENOMEM; asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL); if (!asd_ha->hw_prof.ue.area) goto out2; err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area, offs + sizeof(*bc_struct), size); if (err) { kfree(asd_ha->hw_prof.ue.area); asd_ha->hw_prof.ue.area = NULL; asd_ha->hw_prof.ue.num = 0; asd_ha->hw_prof.ue.size = 0; ASD_DPRINTK("couldn't read ue entries(%d)\n", err); } } out2: kfree(bc_struct); out: return err; } static void asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha) { int i; /* Zero OCM */ for (i = 0; i < OCM_MAX_SIZE; i += 4) asd_write_ocm_dword(asd_ha, i, 0); /* Write Dir */ asd_write_ocm_seg(asd_ha, &OCMDirInit, 0, sizeof(struct asd_ocm_dir)); /* Write Dir Entries */ for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++) asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i], sizeof(struct asd_ocm_dir) + (i * sizeof(struct asd_ocm_dir_ent)) , sizeof(struct asd_ocm_dir_ent)); } static int asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha) { struct pci_dev *pcidev = asd_ha->pcidev; u32 reg; int err = 0; u32 v; /* check if OCM has been initialized by BIOS */ reg = asd_read_reg_dword(asd_ha, EXSICNFGR); if (!(reg & OCMINITIALIZED)) { err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v); if (err) { asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n", pci_name(pcidev)); goto out; } printk(KERN_INFO "OCM is not initialized by BIOS," "reinitialize it and ignore it, current IntrptStatus" "is 0x%x\n", v); if (v) err = pci_write_config_dword(pcidev, PCIC_INTRPT_STAT, v); if (err) { asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n", pci_name(pcidev)); goto out; } asd_hwi_initialize_ocm_dir(asd_ha); } out: return err; } /** * asd_read_ocm - read on chip memory (OCM) * @asd_ha: pointer to the host adapter structure */ int asd_read_ocm(struct asd_ha_struct *asd_ha) { int err; struct asd_ocm_dir *dir; if (asd_hwi_check_ocm_access(asd_ha)) return -1; dir = kmalloc(sizeof(*dir), GFP_KERNEL); if (!dir) { asd_printk("no memory for ocm dir\n"); return -ENOMEM; } err = asd_read_ocm_dir(asd_ha, dir, 0); if (err) goto out; err = asd_get_bios_chim(asd_ha, dir); out: kfree(dir); return err; } /* ---------- FLASH stuff ---------- */ #define FLASH_RESET 0xF0 #define ASD_FLASH_SIZE 0x200000 #define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " #define FLASH_NEXT_ENTRY_OFFS 0x2000 #define FLASH_MAX_DIR_ENTRIES 32 #define FLASH_DE_TYPE_MASK 0x3FFFFFFF #define FLASH_DE_MS 0x120 #define FLASH_DE_CTRL_A_USER 0xE0 struct asd_flash_de { __le32 type; __le32 offs; __le32 pad_size; __le32 image_size; __le32 chksum; u8 _r[12]; u8 version[32]; } __attribute__ ((packed)); struct asd_flash_dir { u8 cookie[32]; __le32 rev; /* 2 */ __le32 chksum; __le32 chksum_antidote; __le32 bld; u8 bld_id[32]; /* build id data */ u8 ver_data[32]; /* date and time of build */ __le32 ae_mask; __le32 v_mask; __le32 oc_mask; u8 _r[20]; struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES]; } __attribute__ ((packed)); struct asd_manuf_sec { char sig[2]; /* 'S', 'M' */ u16 offs_next; u8 maj; /* 0 */ u8 min; /* 0 */ u16 chksum; u16 size; u8 _r[6]; u8 sas_addr[SAS_ADDR_SIZE]; u8 pcba_sn[ASD_PCBA_SN_SIZE]; /* Here start the other segments */ u8 linked_list[0]; } __attribute__ ((packed)); struct asd_manuf_phy_desc { u8 state; /* low 4 bits */ #define MS_PHY_STATE_ENABLED 0 #define MS_PHY_STATE_REPORTED 1 #define MS_PHY_STATE_HIDDEN 2 u8 phy_id; u16 _r; u8 phy_control_0; /* mode 5 reg 0x160 */ u8 phy_control_1; /* mode 5 reg 0x161 */ u8 phy_control_2; /* mode 5 reg 0x162 */ u8 phy_control_3; /* mode 5 reg 0x163 */ } __attribute__ ((packed)); struct asd_manuf_phy_param { char sig[2]; /* 'P', 'M' */ u16 next; u8 maj; /* 0 */ u8 min; /* 2 */ u8 num_phy_desc; /* 8 */ u8 phy_desc_size; /* 8 */ u8 _r[3]; u8 usage_model_id; u32 _r2; struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS]; } __attribute__ ((packed)); #if 0 static const char *asd_sb_type[] = { "unknown", "SGPIO", [2 ... 0x7F] = "unknown", [0x80] = "ADPT_I2C", [0x81 ... 0xFF] = "VENDOR_UNIQUExx" }; #endif struct asd_ms_sb_desc { u8 type; u8 node_desc_index; u8 conn_desc_index; u8 _recvd[0]; } __attribute__ ((packed)); #if 0 static const char *asd_conn_type[] = { [0 ... 7] = "unknown", "SFF8470", "SFF8482", "SFF8484", [0x80] = "PCIX_DAUGHTER0", [0x81] = "SAS_DAUGHTER0", [0x82 ... 0xFF] = "VENDOR_UNIQUExx" }; static const char *asd_conn_location[] = { "unknown", "internal", "external", "board_to_board", }; #endif struct asd_ms_conn_desc { u8 type; u8 location; u8 num_sideband_desc; u8 size_sideband_desc; u32 _resvd; u8 name[16]; struct asd_ms_sb_desc sb_desc[0]; } __attribute__ ((packed)); struct asd_nd_phy_desc { u8 vp_attch_type; u8 attch_specific[0]; } __attribute__ ((packed)); #if 0 static const char *asd_node_type[] = { "IOP", "IO_CONTROLLER", "EXPANDER", "PORT_MULTIPLIER", "PORT_MULTIPLEXER", "MULTI_DROP_I2C_BUS", }; #endif struct asd_ms_node_desc { u8 type; u8 num_phy_desc; u8 size_phy_desc; u8 _resvd; u8 name[16]; struct asd_nd_phy_desc phy_desc[0]; } __attribute__ ((packed)); struct asd_ms_conn_map { char sig[2]; /* 'M', 'C' */ __le16 next; u8 maj; /* 0 */ u8 min; /* 0 */ __le16 cm_size; /* size of this struct */ u8 num_conn; u8 conn_size; u8 num_nodes; u8 usage_model_id; u32 _resvd; struct asd_ms_conn_desc conn_desc[0]; struct asd_ms_node_desc node_desc[0]; } __attribute__ ((packed)); struct asd_ctrla_phy_entry { u8 sas_addr[SAS_ADDR_SIZE]; u8 sas_link_rates; /* max in hi bits, min in low bits */ u8 flags; u8 sata_link_rates; u8 _r[5]; } __attribute__ ((packed)); struct asd_ctrla_phy_settings { u8 id0; /* P'h'y */ u8 _r; u16 next; u8 num_phys; /* number of PHYs in the PCI function */ u8 _r2[3]; struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS]; } __attribute__ ((packed)); struct asd_ll_el { u8 id0; u8 id1; __le16 next; u8 something_here[0]; } __attribute__ ((packed)); static int asd_poll_flash(struct asd_ha_struct *asd_ha) { int c; u8 d; for (c = 5000; c > 0; c--) { d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); if (!d) return 0; udelay(5); } return -ENOENT; } static int asd_reset_flash(struct asd_ha_struct *asd_ha) { int err; err = asd_poll_flash(asd_ha); if (err) return err; asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET); err = asd_poll_flash(asd_ha); return err; } static int asd_read_flash_seg(struct asd_ha_struct *asd_ha, void *buffer, u32 offs, int size) { asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs, size); return 0; } /** * asd_find_flash_dir - finds and reads the flash directory * @asd_ha: pointer to the host adapter structure * @flash_dir: pointer to flash directory structure * * If found, the flash directory segment will be copied to * @flash_dir. Return 1 if found, 0 if not. */ static int asd_find_flash_dir(struct asd_ha_struct *asd_ha, struct asd_flash_dir *flash_dir) { u32 v; for (v = 0; v < ASD_FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { asd_read_flash_seg(asd_ha, flash_dir, v, sizeof(FLASH_DIR_COOKIE)-1); if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE, sizeof(FLASH_DIR_COOKIE)-1) == 0) { asd_ha->hw_prof.flash.dir_offs = v; asd_read_flash_seg(asd_ha, flash_dir, v, sizeof(*flash_dir)); return 1; } } return 0; } static int asd_flash_getid(struct asd_ha_struct *asd_ha) { int err = 0; u32 reg; reg = asd_read_reg_dword(asd_ha, EXSICNFGR); if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR, &asd_ha->hw_prof.flash.bar)) { asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n", pci_name(asd_ha->pcidev)); return -ENOENT; } asd_ha->hw_prof.flash.present = 1; asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0; err = asd_reset_flash(asd_ha); if (err) { ASD_DPRINTK("couldn't reset flash(%d)\n", err); return err; } return 0; } static u16 asd_calc_flash_chksum(u16 *p, int size) { u16 chksum = 0; while (size-- > 0) chksum += *p++; return chksum; } static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type, u32 *offs, u32 *size) { int i; struct asd_flash_de *de; for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) { u32 type = le32_to_cpu(flash_dir->dir_entry[i].type); type &= FLASH_DE_TYPE_MASK; if (type == entry_type) break; } if (i >= FLASH_MAX_DIR_ENTRIES) return -ENOENT; de = &flash_dir->dir_entry[i]; *offs = le32_to_cpu(de->offs); *size = le32_to_cpu(de->pad_size); return 0; } static int asd_validate_ms(struct asd_manuf_sec *ms) { if (ms->sig[0] != 'S' || ms->sig[1] != 'M') { ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n", ms->sig[0], ms->sig[1]); return -ENOENT; } if (ms->maj != 0) { asd_printk("unsupported manuf. sector. major version:%x\n", ms->maj); return -ENOENT; } ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next); ms->chksum = le16_to_cpu((__force __le16) ms->chksum); ms->size = le16_to_cpu((__force __le16) ms->size); if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) { asd_printk("failed manuf sector checksum\n"); } return 0; } static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha, struct asd_manuf_sec *ms) { memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE); return 0; } static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha, struct asd_manuf_sec *ms) { memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE); asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0'; return 0; } /** * asd_find_ll_by_id - find a linked list entry by its id * @start: void pointer to the first element in the linked list * @id0: the first byte of the id (offs 0) * @id1: the second byte of the id (offs 1) * * @start has to be the _base_ element start, since the * linked list entries's offset is from this pointer. * Some linked list entries use only the first id, in which case * you can pass 0xFF for the second. */ static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1) { struct asd_ll_el *el = start; do { switch (id1) { default: if (el->id1 == id1) case 0xFF: if (el->id0 == id0) return el; } el = start + le16_to_cpu(el->next); } while (el != start); return NULL; } /** * asd_ms_get_phy_params - get phy parameters from the manufacturing sector * @asd_ha: pointer to the host adapter structure * @manuf_sec: pointer to the manufacturing sector * * The manufacturing sector contans also the linked list of sub-segments, * since when it was read, its size was taken from the flash directory, * not from the structure size. * * HIDDEN phys do not count in the total count. REPORTED phys cannot * be enabled but are reported and counted towards the total. * ENABLED phys are enabled by default and count towards the total. * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys * merely specifies the number of phys the host adapter decided to * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENABLED. * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 * are actually enabled (enabled by default, max number of phys * enableable in this case). */ static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha, struct asd_manuf_sec *manuf_sec) { int i; int en_phys = 0; int rep_phys = 0; struct asd_manuf_phy_param *phy_param; struct asd_manuf_phy_param dflt_phy_param; phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M'); if (!phy_param) { ASD_DPRINTK("ms: no phy parameters found\n"); ASD_DPRINTK("ms: Creating default phy parameters\n"); dflt_phy_param.sig[0] = 'P'; dflt_phy_param.sig[1] = 'M'; dflt_phy_param.maj = 0; dflt_phy_param.min = 2; dflt_phy_param.num_phy_desc = 8; dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc); for (i =0; i < ASD_MAX_PHYS; i++) { dflt_phy_param.phy_desc[i].state = 0; dflt_phy_param.phy_desc[i].phy_id = i; dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6; dflt_phy_param.phy_desc[i].phy_control_1 = 0x10; dflt_phy_param.phy_desc[i].phy_control_2 = 0x43; dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb; } phy_param = &dflt_phy_param; } if (phy_param->maj != 0) { asd_printk("unsupported manuf. phy param major version:0x%x\n", phy_param->maj); return -ENOENT; } ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc); asd_ha->hw_prof.enabled_phys = 0; for (i = 0; i < phy_param->num_phy_desc; i++) { struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i]; switch (pd->state & 0xF) { case MS_PHY_STATE_HIDDEN: ASD_DPRINTK("ms: phy%d: HIDDEN\n", i); continue; case MS_PHY_STATE_REPORTED: ASD_DPRINTK("ms: phy%d: REPORTED\n", i); asd_ha->hw_prof.enabled_phys &= ~(1 << i); rep_phys++; continue; case MS_PHY_STATE_ENABLED: ASD_DPRINTK("ms: phy%d: ENABLED\n", i); asd_ha->hw_prof.enabled_phys |= (1 << i); en_phys++; break; } asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0; asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1; asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2; asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3; } asd_ha->hw_prof.max_phys = rep_phys + en_phys; asd_ha->hw_prof.num_phys = en_phys; ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n", asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys); ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys); return 0; } static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha, struct asd_manuf_sec *manuf_sec) { struct asd_ms_conn_map *cm; cm = asd_find_ll_by_id(manuf_sec, 'M', 'C'); if (!cm) { ASD_DPRINTK("ms: no connector map found\n"); return 0; } if (cm->maj != 0) { ASD_DPRINTK("ms: unsupported: connector map major version 0x%x" "\n", cm->maj); return -ENOENT; } /* XXX */ return 0; } /** * asd_process_ms - find and extract information from the manufacturing sector * @asd_ha: pointer to the host adapter structure * @flash_dir: pointer to the flash directory */ static int asd_process_ms(struct asd_ha_struct *asd_ha, struct asd_flash_dir *flash_dir) { int err; struct asd_manuf_sec *manuf_sec; u32 offs, size; err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size); if (err) { ASD_DPRINTK("Couldn't find the manuf. sector\n"); goto out; } if (size == 0) goto out; err = -ENOMEM; manuf_sec = kmalloc(size, GFP_KERNEL); if (!manuf_sec) { ASD_DPRINTK("no mem for manuf sector\n"); goto out; } err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size); if (err) { ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n", offs, size); goto out2; } err = asd_validate_ms(manuf_sec); if (err) { ASD_DPRINTK("couldn't validate manuf sector\n"); goto out2; } err = asd_ms_get_sas_addr(asd_ha, manuf_sec); if (err) { ASD_DPRINTK("couldn't read the SAS_ADDR\n"); goto out2; } ASD_DPRINTK("manuf sect SAS_ADDR %llx\n", SAS_ADDR(asd_ha->hw_prof.sas_addr)); err = asd_ms_get_pcba_sn(asd_ha, manuf_sec); if (err) { ASD_DPRINTK("couldn't read the PCBA SN\n"); goto out2; } ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn); err = asd_ms_get_phy_params(asd_ha, manuf_sec); if (err) { ASD_DPRINTK("ms: couldn't get phy parameters\n"); goto out2; } err = asd_ms_get_connector_map(asd_ha, manuf_sec); if (err) { ASD_DPRINTK("ms: couldn't get connector map\n"); goto out2; } out2: kfree(manuf_sec); out: return err; } static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha, struct asd_ctrla_phy_settings *ps) { int i; for (i = 0; i < ps->num_phys; i++) { struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i]; if (!PHY_ENABLED(asd_ha, i)) continue; if (*(u64 *)pe->sas_addr == 0) { asd_ha->hw_prof.enabled_phys &= ~(1 << i); continue; } /* This is the SAS address which should be sent in IDENTIFY. */ memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr, SAS_ADDR_SIZE); asd_ha->hw_prof.phy_desc[i].max_sas_lrate = (pe->sas_link_rates & 0xF0) >> 4; asd_ha->hw_prof.phy_desc[i].min_sas_lrate = (pe->sas_link_rates & 0x0F); asd_ha->hw_prof.phy_desc[i].max_sata_lrate = (pe->sata_link_rates & 0xF0) >> 4; asd_ha->hw_prof.phy_desc[i].min_sata_lrate = (pe->sata_link_rates & 0x0F); asd_ha->hw_prof.phy_desc[i].flags = pe->flags; ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x," " sata rate:0x%x-0x%x, flags:0x%x\n", i, SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr), asd_ha->hw_prof.phy_desc[i].max_sas_lrate, asd_ha->hw_prof.phy_desc[i].min_sas_lrate, asd_ha->hw_prof.phy_desc[i].max_sata_lrate, asd_ha->hw_prof.phy_desc[i].min_sata_lrate, asd_ha->hw_prof.phy_desc[i].flags); } return 0; } /** * asd_process_ctrl_a_user - process CTRL-A user settings * @asd_ha: pointer to the host adapter structure * @flash_dir: pointer to the flash directory */ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha, struct asd_flash_dir *flash_dir) { int err, i; u32 offs, size; struct asd_ll_el *el; struct asd_ctrla_phy_settings *ps; struct asd_ctrla_phy_settings dflt_ps; err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size); if (err) { ASD_DPRINTK("couldn't find CTRL-A user settings section\n"); ASD_DPRINTK("Creating default CTRL-A user settings section\n"); dflt_ps.id0 = 'h'; dflt_ps.num_phys = 8; for (i =0; i < ASD_MAX_PHYS; i++) { memcpy(dflt_ps.phy_ent[i].sas_addr, asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); dflt_ps.phy_ent[i].sas_link_rates = 0x98; dflt_ps.phy_ent[i].flags = 0x0; dflt_ps.phy_ent[i].sata_link_rates = 0x0; } size = sizeof(struct asd_ctrla_phy_settings); ps = &dflt_ps; } if (size == 0) goto out; err = -ENOMEM; el = kmalloc(size, GFP_KERNEL); if (!el) { ASD_DPRINTK("no mem for ctrla user settings section\n"); goto out; } err = asd_read_flash_seg(asd_ha, (void *)el, offs, size); if (err) { ASD_DPRINTK("couldn't read ctrla phy settings section\n"); goto out2; } err = -ENOENT; ps = asd_find_ll_by_id(el, 'h', 0xFF); if (!ps) { ASD_DPRINTK("couldn't find ctrla phy settings struct\n"); goto out2; } err = asd_process_ctrla_phy_settings(asd_ha, ps); if (err) { ASD_DPRINTK("couldn't process ctrla phy settings\n"); goto out2; } out2: kfree(el); out: return err; } /** * asd_read_flash - read flash memory * @asd_ha: pointer to the host adapter structure */ int asd_read_flash(struct asd_ha_struct *asd_ha) { int err; struct asd_flash_dir *flash_dir; err = asd_flash_getid(asd_ha); if (err) return err; flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL); if (!flash_dir) return -ENOMEM; err = -ENOENT; if (!asd_find_flash_dir(asd_ha, flash_dir)) { ASD_DPRINTK("couldn't find flash directory\n"); goto out; } if (le32_to_cpu(flash_dir->rev) != 2) { asd_printk("unsupported flash dir version:0x%x\n", le32_to_cpu(flash_dir->rev)); goto out; } err = asd_process_ms(asd_ha, flash_dir); if (err) { ASD_DPRINTK("couldn't process manuf sector settings\n"); goto out; } err = asd_process_ctrl_a_user(asd_ha, flash_dir); if (err) { ASD_DPRINTK("couldn't process CTRL-A user settings\n"); goto out; } out: kfree(flash_dir); return err; } /** * asd_verify_flash_seg - verify data with flash memory * @asd_ha: pointer to the host adapter structure * @src: pointer to the source data to be verified * @dest_offset: offset from flash memory * @bytes_to_verify: total bytes to verify */ int asd_verify_flash_seg(struct asd_ha_struct *asd_ha, const void *src, u32 dest_offset, u32 bytes_to_verify) { const u8 *src_buf; u8 flash_char; int err; u32 nv_offset, reg, i; reg = asd_ha->hw_prof.flash.bar; src_buf = NULL; err = FLASH_OK; nv_offset = dest_offset; src_buf = (const u8 *)src; for (i = 0; i < bytes_to_verify; i++) { flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i); if (flash_char != src_buf[i]) { err = FAIL_VERIFY; break; } } return err; } /** * asd_write_flash_seg - write data into flash memory * @asd_ha: pointer to the host adapter structure * @src: pointer to the source data to be written * @dest_offset: offset from flash memory * @bytes_to_write: total bytes to write */ int asd_write_flash_seg(struct asd_ha_struct *asd_ha, const void *src, u32 dest_offset, u32 bytes_to_write) { const u8 *src_buf; u32 nv_offset, reg, i; int err; reg = asd_ha->hw_prof.flash.bar; src_buf = NULL; err = asd_check_flash_type(asd_ha); if (err) { ASD_DPRINTK("couldn't find the type of flash. err=%d\n", err); return err; } nv_offset = dest_offset; err = asd_erase_nv_sector(asd_ha, nv_offset, bytes_to_write); if (err) { ASD_DPRINTK("Erase failed at offset:0x%x\n", nv_offset); return err; } err = asd_reset_flash(asd_ha); if (err) { ASD_DPRINTK("couldn't reset flash. err=%d\n", err); return err; } src_buf = (const u8 *)src; for (i = 0; i < bytes_to_write; i++) { /* Setup program command sequence */ switch (asd_ha->hw_prof.flash.method) { case FLASH_METHOD_A: { asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55); asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xA0); asd_write_reg_byte(asd_ha, (reg + nv_offset + i), (*(src_buf + i))); break; } case FLASH_METHOD_B: { asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); asd_write_reg_byte(asd_ha, (reg + 0x555), 0xA0); asd_write_reg_byte(asd_ha, (reg + nv_offset + i), (*(src_buf + i))); break; } default: break; } if (asd_chk_write_status(asd_ha, (nv_offset + i), 0) != 0) { ASD_DPRINTK("aicx: Write failed at offset:0x%x\n", reg + nv_offset + i); return FAIL_WRITE_FLASH; } } err = asd_reset_flash(asd_ha); if (err) { ASD_DPRINTK("couldn't reset flash. err=%d\n", err); return err; } return 0; } int asd_chk_write_status(struct asd_ha_struct *asd_ha, u32 sector_addr, u8 erase_flag) { u32 reg; u32 loop_cnt; u8 nv_data1, nv_data2; u8 toggle_bit1; /* * Read from DQ2 requires sector address * while it's dont care for DQ6 */ reg = asd_ha->hw_prof.flash.bar; for (loop_cnt = 0; loop_cnt < 50000; loop_cnt++) { nv_data1 = asd_read_reg_byte(asd_ha, reg); nv_data2 = asd_read_reg_byte(asd_ha, reg); toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6) ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6)); if (toggle_bit1 == 0) { return 0; } else { if (nv_data2 & FLASH_STATUS_BIT_MASK_DQ5) { nv_data1 = asd_read_reg_byte(asd_ha, reg); nv_data2 = asd_read_reg_byte(asd_ha, reg); toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6) ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6)); if (toggle_bit1 == 0) return 0; } } /* * ERASE is a sector-by-sector operation and requires * more time to finish while WRITE is byte-byte-byte * operation and takes lesser time to finish. * * For some strange reason a reduced ERASE delay gives different * behaviour across different spirit boards. Hence we set * a optimum balance of 50mus for ERASE which works well * across all boards. */ if (erase_flag) { udelay(FLASH_STATUS_ERASE_DELAY_COUNT); } else { udelay(FLASH_STATUS_WRITE_DELAY_COUNT); } } return -1; } /** * asd_hwi_erase_nv_sector - Erase the flash memory sectors. * @asd_ha: pointer to the host adapter structure * @flash_addr: pointer to offset from flash memory * @size: total bytes to erase. */ int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, u32 flash_addr, u32 size) { u32 reg; u32 sector_addr; reg = asd_ha->hw_prof.flash.bar; /* sector staring address */ sector_addr = flash_addr & FLASH_SECTOR_SIZE_MASK; /* * Erasing an flash sector needs to be done in six consecutive * write cyles. */ while (sector_addr < flash_addr+size) { switch (asd_ha->hw_prof.flash.method) { case FLASH_METHOD_A: asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55); asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0x80); asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55); asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30); break; case FLASH_METHOD_B: asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); asd_write_reg_byte(asd_ha, (reg + 0x555), 0x80); asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30); break; default: break; } if (asd_chk_write_status(asd_ha, sector_addr, 1) != 0) return FAIL_ERASE_FLASH; sector_addr += FLASH_SECTOR_SIZE; } return 0; } int asd_check_flash_type(struct asd_ha_struct *asd_ha) { u8 manuf_id; u8 dev_id; u8 sec_prot; u32 inc; u32 reg; int err; /* get Flash memory base address */ reg = asd_ha->hw_prof.flash.bar; /* Determine flash info */ err = asd_reset_flash(asd_ha); if (err) { ASD_DPRINTK("couldn't reset flash. err=%d\n", err); return err; } asd_ha->hw_prof.flash.method = FLASH_METHOD_UNKNOWN; asd_ha->hw_prof.flash.manuf = FLASH_MANUF_ID_UNKNOWN; asd_ha->hw_prof.flash.dev_id = FLASH_DEV_ID_UNKNOWN; /* Get flash info. This would most likely be AMD Am29LV family flash. * First try the sequence for word mode. It is the same as for * 008B (byte mode only), 160B (word mode) and 800D (word mode). */ inc = asd_ha->hw_prof.flash.wide ? 2 : 1; asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA); asd_write_reg_byte(asd_ha, reg + 0x555, 0x55); asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90); manuf_id = asd_read_reg_byte(asd_ha, reg); dev_id = asd_read_reg_byte(asd_ha, reg + inc); sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc); /* Get out of autoselect mode. */ err = asd_reset_flash(asd_ha); if (err) { ASD_DPRINTK("couldn't reset flash. err=%d\n", err); return err; } ASD_DPRINTK("Flash MethodA manuf_id(0x%x) dev_id(0x%x) " "sec_prot(0x%x)\n", manuf_id, dev_id, sec_prot); err = asd_reset_flash(asd_ha); if (err != 0) return err; switch (manuf_id) { case FLASH_MANUF_ID_AMD: switch (sec_prot) { case FLASH_DEV_ID_AM29LV800DT: case FLASH_DEV_ID_AM29LV640MT: case FLASH_DEV_ID_AM29F800B: asd_ha->hw_prof.flash.method = FLASH_METHOD_A; break; default: break; } break; case FLASH_MANUF_ID_ST: switch (sec_prot) { case FLASH_DEV_ID_STM29W800DT: case FLASH_DEV_ID_STM29LV640: asd_ha->hw_prof.flash.method = FLASH_METHOD_A; break; default: break; } break; case FLASH_MANUF_ID_FUJITSU: switch (sec_prot) { case FLASH_DEV_ID_MBM29LV800TE: case FLASH_DEV_ID_MBM29DL800TA: asd_ha->hw_prof.flash.method = FLASH_METHOD_A; break; } break; case FLASH_MANUF_ID_MACRONIX: switch (sec_prot) { case FLASH_DEV_ID_MX29LV800BT: asd_ha->hw_prof.flash.method = FLASH_METHOD_A; break; } break; } if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) { err = asd_reset_flash(asd_ha); if (err) { ASD_DPRINTK("couldn't reset flash. err=%d\n", err); return err; } /* Issue Unlock sequence for AM29LV008BT */ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA); asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55); asd_write_reg_byte(asd_ha, (reg + 0x555), 0x90); manuf_id = asd_read_reg_byte(asd_ha, reg); dev_id = asd_read_reg_byte(asd_ha, reg + inc); sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc); ASD_DPRINTK("Flash MethodB manuf_id(0x%x) dev_id(0x%x) sec_prot" "(0x%x)\n", manuf_id, dev_id, sec_prot); err = asd_reset_flash(asd_ha); if (err != 0) { ASD_DPRINTK("couldn't reset flash. err=%d\n", err); return err; } switch (manuf_id) { case FLASH_MANUF_ID_AMD: switch (dev_id) { case FLASH_DEV_ID_AM29LV008BT: asd_ha->hw_prof.flash.method = FLASH_METHOD_B; break; default: break; } break; case FLASH_MANUF_ID_ST: switch (dev_id) { case FLASH_DEV_ID_STM29008: asd_ha->hw_prof.flash.method = FLASH_METHOD_B; break; default: break; } break; case FLASH_MANUF_ID_FUJITSU: switch (dev_id) { case FLASH_DEV_ID_MBM29LV008TA: asd_ha->hw_prof.flash.method = FLASH_METHOD_B; break; } break; case FLASH_MANUF_ID_INTEL: switch (dev_id) { case FLASH_DEV_ID_I28LV00TAT: asd_ha->hw_prof.flash.method = FLASH_METHOD_B; break; } break; case FLASH_MANUF_ID_MACRONIX: switch (dev_id) { case FLASH_DEV_ID_I28LV00TAT: asd_ha->hw_prof.flash.method = FLASH_METHOD_B; break; } break; default: return FAIL_FIND_FLASH_ID; } } if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) return FAIL_FIND_FLASH_ID; asd_ha->hw_prof.flash.manuf = manuf_id; asd_ha->hw_prof.flash.dev_id = dev_id; asd_ha->hw_prof.flash.sec_prot = sec_prot; return 0; }
gpl-2.0
djwong/linux-xfs-dev
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
36
9428
// SPDX-License-Identifier: GPL-2.0 /* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lnet/klnds/o2iblnd/o2iblnd_modparams.c * * Author: Eric Barton <eric@bartonsoftware.com> */ #include "o2iblnd.h" static int service = 987; module_param(service, int, 0444); MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)"); static int cksum; module_param(cksum, int, 0644); MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums"); static int timeout = 50; module_param(timeout, int, 0644); MODULE_PARM_DESC(timeout, "timeout (seconds)"); /* * Number of threads in each scheduler pool which is percpt, * we will estimate reasonable value based on CPUs if it's set to zero. */ static int nscheds; module_param(nscheds, int, 0444); MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool"); /* NB: this value is shared by all CPTs, it can grow at runtime */ static int ntx = 512; module_param(ntx, int, 0444); MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool"); /* NB: this value is shared by all CPTs */ static int credits = 256; module_param(credits, int, 0444); MODULE_PARM_DESC(credits, "# concurrent sends"); static int peer_credits = 8; module_param(peer_credits, int, 0444); MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer"); static int peer_credits_hiw; module_param(peer_credits_hiw, int, 0444); MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits"); static int peer_buffer_credits; module_param(peer_buffer_credits, int, 0444); MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits"); static int peer_timeout = 180; module_param(peer_timeout, int, 0444); MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)"); static char *ipif_name = "ib0"; module_param(ipif_name, charp, 0444); MODULE_PARM_DESC(ipif_name, "IPoIB interface name"); static int retry_count = 5; module_param(retry_count, int, 0644); MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received"); static int rnr_retry_count = 6; module_param(rnr_retry_count, int, 0644); MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions"); static int keepalive = 100; module_param(keepalive, int, 0644); MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive"); static int ib_mtu; module_param(ib_mtu, int, 0444); MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096"); static int concurrent_sends; module_param(concurrent_sends, int, 0444); MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing"); #define IBLND_DEFAULT_MAP_ON_DEMAND IBLND_MAX_RDMA_FRAGS static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND; module_param(map_on_demand, int, 0444); MODULE_PARM_DESC(map_on_demand, "map on demand"); /* NB: this value is shared by all CPTs, it can grow at runtime */ static int fmr_pool_size = 512; module_param(fmr_pool_size, int, 0444); MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)"); /* NB: this value is shared by all CPTs, it can grow at runtime */ static int fmr_flush_trigger = 384; module_param(fmr_flush_trigger, int, 0444); MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush"); static int fmr_cache = 1; module_param(fmr_cache, int, 0444); MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching"); /* * 0: disable failover * 1: enable failover if necessary * 2: force to failover (for debug) */ static int dev_failover; module_param(dev_failover, int, 0444); MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)"); static int require_privileged_port; module_param(require_privileged_port, int, 0644); MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection"); static int use_privileged_port = 1; module_param(use_privileged_port, int, 0644); MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection"); struct kib_tunables kiblnd_tunables = { .kib_dev_failover = &dev_failover, .kib_service = &service, .kib_cksum = &cksum, .kib_timeout = &timeout, .kib_keepalive = &keepalive, .kib_ntx = &ntx, .kib_default_ipif = &ipif_name, .kib_retry_count = &retry_count, .kib_rnr_retry_count = &rnr_retry_count, .kib_ib_mtu = &ib_mtu, .kib_require_priv_port = &require_privileged_port, .kib_use_priv_port = &use_privileged_port, .kib_nscheds = &nscheds }; static struct lnet_ioctl_config_o2iblnd_tunables default_tunables; /* # messages/RDMAs in-flight */ int kiblnd_msg_queue_size(int version, struct lnet_ni *ni) { if (version == IBLND_MSG_VERSION_1) return IBLND_MSG_QUEUE_SIZE_V1; else if (ni) return ni->ni_peertxcredits; else return peer_credits; } int kiblnd_tunables_setup(struct lnet_ni *ni) { struct lnet_ioctl_config_o2iblnd_tunables *tunables; /* * if there was no tunables specified, setup the tunables to be * defaulted */ if (!ni->ni_lnd_tunables) { ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables), GFP_NOFS); if (!ni->ni_lnd_tunables) return -ENOMEM; memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib, &default_tunables, sizeof(*tunables)); } tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; /* Current API version */ tunables->lnd_version = 0; if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", *kiblnd_tunables.kib_ib_mtu); return -EINVAL; } if (!ni->ni_peertimeout) ni->ni_peertimeout = peer_timeout; if (!ni->ni_maxtxcredits) ni->ni_maxtxcredits = credits; if (!ni->ni_peertxcredits) ni->ni_peertxcredits = peer_credits; if (!ni->ni_peerrtrcredits) ni->ni_peerrtrcredits = peer_buffer_credits; if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT) ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT; if (ni->ni_peertxcredits > IBLND_CREDITS_MAX) ni->ni_peertxcredits = IBLND_CREDITS_MAX; if (ni->ni_peertxcredits > credits) ni->ni_peertxcredits = credits; if (!tunables->lnd_peercredits_hiw) tunables->lnd_peercredits_hiw = peer_credits_hiw; if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2) tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2; if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits) tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1; if (tunables->lnd_map_on_demand <= 0 || tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) { /* Use the default */ CWARN("Invalid map_on_demand (%d), expects 1 - %d. Using default of %d\n", tunables->lnd_map_on_demand, IBLND_MAX_RDMA_FRAGS, IBLND_DEFAULT_MAP_ON_DEMAND); tunables->lnd_map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND; } if (tunables->lnd_map_on_demand == 1) { /* don't make sense to create map if only one fragment */ tunables->lnd_map_on_demand = 2; } if (!tunables->lnd_concurrent_sends) { if (tunables->lnd_map_on_demand > 0 && tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) { tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2; } else { tunables->lnd_concurrent_sends = ni->ni_peertxcredits; } } if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2) tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2; if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2) tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2; if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) { CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n", tunables->lnd_concurrent_sends, ni->ni_peertxcredits); } if (!tunables->lnd_fmr_pool_size) tunables->lnd_fmr_pool_size = fmr_pool_size; if (!tunables->lnd_fmr_flush_trigger) tunables->lnd_fmr_flush_trigger = fmr_flush_trigger; if (!tunables->lnd_fmr_cache) tunables->lnd_fmr_cache = fmr_cache; return 0; } void kiblnd_tunables_init(void) { default_tunables.lnd_version = 0; default_tunables.lnd_peercredits_hiw = peer_credits_hiw, default_tunables.lnd_map_on_demand = map_on_demand; default_tunables.lnd_concurrent_sends = concurrent_sends; default_tunables.lnd_fmr_pool_size = fmr_pool_size; default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger; default_tunables.lnd_fmr_cache = fmr_cache; }
gpl-2.0
olear/R104
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
36
60443
/******************************************************************************* This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. ST Ethernet IPs are built around a Synopsys IP Core. Copyright(C) 2007-2011 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Documentation available at: http://www.stlinux.com Support available at: https://bugzilla.stlinux.com/ *******************************************************************************/ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if_ether.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/prefetch.h> #ifdef CONFIG_STMMAC_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> #endif #include "stmmac.h" #define STMMAC_RESOURCE_NAME "stmmaceth" #undef STMMAC_DEBUG /*#define STMMAC_DEBUG*/ #ifdef STMMAC_DEBUG #define DBG(nlevel, klevel, fmt, args...) \ ((void)(netif_msg_##nlevel(priv) && \ printk(KERN_##klevel fmt, ## args))) #else #define DBG(nlevel, klevel, fmt, args...) do { } while (0) #endif #undef STMMAC_RX_DEBUG /*#define STMMAC_RX_DEBUG*/ #ifdef STMMAC_RX_DEBUG #define RX_DBG(fmt, args...) printk(fmt, ## args) #else #define RX_DBG(fmt, args...) do { } while (0) #endif #undef STMMAC_XMIT_DEBUG /*#define STMMAC_XMIT_DEBUG*/ #ifdef STMMAC_TX_DEBUG #define TX_DBG(fmt, args...) printk(fmt, ## args) #else #define TX_DBG(fmt, args...) do { } while (0) #endif #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) #define JUMBO_LEN 9000 /* Module parameters */ #define TX_TIMEO 5000 /* default 5 seconds */ static int watchdog = TX_TIMEO; module_param(watchdog, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds"); static int debug = -1; /* -1: default, 0: no output, 16: all */ module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); static int phyaddr = -1; module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); #define DMA_TX_SIZE 256 static int dma_txsize = DMA_TX_SIZE; module_param(dma_txsize, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); #define DMA_RX_SIZE 256 static int dma_rxsize = DMA_RX_SIZE; module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); static int flow_ctrl = FLOW_OFF; module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); static int pause = PAUSE_TIME; module_param(pause, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pause, "Flow Control Pause Time"); #define TC_DEFAULT 64 static int tc = TC_DEFAULT; module_param(tc, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(tc, "DMA threshold control value"); /* Pay attention to tune this parameter; take care of both * hardware capability and network stabitily/performance impact. * Many tests showed that ~4ms latency seems to be good enough. */ #ifdef CONFIG_STMMAC_TIMER #define DEFAULT_PERIODIC_RATE 256 static int tmrate = DEFAULT_PERIODIC_RATE; module_param(tmrate, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)"); #endif #define DMA_BUFFER_SIZE BUF_SIZE_2KiB static int buf_sz = DMA_BUFFER_SIZE; module_param(buf_sz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); /** * stmmac_verify_args - verify the driver parameters. * Description: it verifies if some wrong parameter is passed to the driver. * Note that wrong parameters are replaced with the default values. */ static void stmmac_verify_args(void) { if (unlikely(watchdog < 0)) watchdog = TX_TIMEO; if (unlikely(dma_rxsize < 0)) dma_rxsize = DMA_RX_SIZE; if (unlikely(dma_txsize < 0)) dma_txsize = DMA_TX_SIZE; if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) buf_sz = DMA_BUFFER_SIZE; if (unlikely(flow_ctrl > 1)) flow_ctrl = FLOW_AUTO; else if (likely(flow_ctrl < 0)) flow_ctrl = FLOW_OFF; if (unlikely((pause < 0) || (pause > 0xffff))) pause = PAUSE_TIME; } #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) static void print_pkt(unsigned char *buf, int len) { int j; pr_info("len = %d byte, buf addr: 0x%p", len, buf); for (j = 0; j < len; j++) { if ((j % 16) == 0) pr_info("\n %03x:", j); pr_info(" %02x", buf[j]); } pr_info("\n"); } #endif /* minimum number of free TX descriptors required to wake up TX process */ #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) { return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; } /* On some ST platforms, some HW system configuraton registers have to be * set according to the link speed negotiated. */ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) { struct phy_device *phydev = priv->phydev; if (likely(priv->plat->fix_mac_speed)) priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); } /** * stmmac_adjust_link * @dev: net device structure * Description: it adjusts the link parameters. */ static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; unsigned long flags; int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; if (phydev == NULL) return; DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", phydev->addr, phydev->link); spin_lock_irqsave(&priv->lock, flags); if (phydev->link) { u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); /* Now we make sure that we can be in full duplex mode. * If not, we operate in half-duplex mode. */ if (phydev->duplex != priv->oldduplex) { new_state = 1; if (!(phydev->duplex)) ctrl &= ~priv->hw->link.duplex; else ctrl |= priv->hw->link.duplex; priv->oldduplex = phydev->duplex; } /* Flow Control operation */ if (phydev->pause) priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex, fc, pause_time); if (phydev->speed != priv->speed) { new_state = 1; switch (phydev->speed) { case 1000: if (likely(priv->plat->has_gmac)) ctrl &= ~priv->hw->link.port; stmmac_hw_fix_mac_speed(priv); break; case 100: case 10: if (priv->plat->has_gmac) { ctrl |= priv->hw->link.port; if (phydev->speed == SPEED_100) { ctrl |= priv->hw->link.speed; } else { ctrl &= ~(priv->hw->link.speed); } } else { ctrl &= ~priv->hw->link.port; } stmmac_hw_fix_mac_speed(priv); break; default: if (netif_msg_link(priv)) pr_warning("%s: Speed (%d) is not 10" " or 100!\n", dev->name, phydev->speed); break; } priv->speed = phydev->speed; } writel(ctrl, priv->ioaddr + MAC_CTRL_REG); if (!priv->oldlink) { new_state = 1; priv->oldlink = 1; } } else if (priv->oldlink) { new_state = 1; priv->oldlink = 0; priv->speed = 0; priv->oldduplex = -1; } if (new_state && netif_msg_link(priv)) phy_print_status(phydev); spin_unlock_irqrestore(&priv->lock, flags); DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); } /** * stmmac_init_phy - PHY initialization * @dev: net device structure * Description: it initializes the driver's PHY state, and attaches the PHY * to the mac driver. * Return value: * 0 on success */ static int stmmac_init_phy(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev; char phy_id[MII_BUS_ID_SIZE + 3]; char bus_id[MII_BUS_ID_SIZE]; int interface = priv->plat->interface; priv->oldlink = 0; priv->speed = 0; priv->oldduplex = -1; snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id); snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->plat->phy_addr); pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface); if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } /* Stop Advertising 1000BASE Capability if interface is not GMII */ if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII)) phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); /* * Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent * device as well. * Note: phydev->phy_id is the result of reading the UID PHY registers. */ if (phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; } pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); priv->phydev = phydev; return 0; } static inline void stmmac_enable_mac(void __iomem *ioaddr) { u32 value = readl(ioaddr + MAC_CTRL_REG); value |= MAC_RNABLE_RX | MAC_ENABLE_TX; writel(value, ioaddr + MAC_CTRL_REG); } static inline void stmmac_disable_mac(void __iomem *ioaddr) { u32 value = readl(ioaddr + MAC_CTRL_REG); value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); writel(value, ioaddr + MAC_CTRL_REG); } /** * display_ring * @p: pointer to the ring. * @size: size of the ring. * Description: display all the descriptors within the ring. */ static void display_ring(struct dma_desc *p, int size) { struct tmp_s { u64 a; unsigned int b; unsigned int c; }; int i; for (i = 0; i < size; i++) { struct tmp_s *x = (struct tmp_s *)(p + i); pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", i, (unsigned int)virt_to_phys(&p[i]), (unsigned int)(x->a), (unsigned int)((x->a) >> 32), x->b, x->c); pr_info("\n"); } } static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; if (mtu >= BUF_SIZE_4KiB) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; else if (mtu >= DMA_BUFFER_SIZE) ret = BUF_SIZE_2KiB; else ret = DMA_BUFFER_SIZE; return ret; } /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. It suppors the chained and ring * modes. */ static void init_dma_desc_rings(struct net_device *dev) { int i; struct stmmac_priv *priv = netdev_priv(dev); struct sk_buff *skb; unsigned int txsize = priv->dma_tx_size; unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize; int dis_ic = 0; int des3_as_data_buf = 0; /* Set the max buffer size according to the DESC mode * and the MTU. Note that RING mode allows 16KiB bsize. */ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); if (bfsize == BUF_SIZE_16KiB) des3_as_data_buf = 1; else bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); #ifdef CONFIG_STMMAC_TIMER /* Disable interrupts on completion for the reception if timer is on */ if (likely(priv->tm->enable)) dis_ic = 1; #endif DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", txsize, rxsize, bfsize); priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); priv->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); priv->dma_rx = (struct dma_desc *)dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, GFP_KERNEL); priv->dma_tx = (struct dma_desc *)dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); return; } DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, " "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", dev->name, priv->dma_rx, priv->dma_tx, (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); /* RX INITIALIZATION */ DBG(probe, INFO, "stmmac: SKB addresses:\n" "skb\t\tskb data\tdma data\n"); for (i = 0; i < rxsize; i++) { struct dma_desc *p = priv->dma_rx + i; skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, GFP_KERNEL); if (unlikely(skb == NULL)) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); break; } skb_reserve(skb, NET_IP_ALIGN); priv->rx_skbuff[i] = skb; priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); p->des2 = priv->rx_skbuff_dma[i]; priv->hw->ring->init_desc3(des3_as_data_buf, p); DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; priv->dirty_rx = (unsigned int)(i - rxsize); priv->dma_buf_sz = bfsize; buf_sz = bfsize; /* TX INITIALIZATION */ for (i = 0; i < txsize; i++) { priv->tx_skbuff[i] = NULL; priv->dma_tx[i].des2 = 0; } /* In case of Chained mode this sets the des3 to the next * element in the chain */ priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize); priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize); priv->dirty_tx = 0; priv->cur_tx = 0; /* Clear the Rx/Tx descriptors */ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); if (netif_msg_hw(priv)) { pr_info("RX descriptor ring:\n"); display_ring(priv->dma_rx, rxsize); pr_info("TX descriptor ring:\n"); display_ring(priv->dma_tx, txsize); } } static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; for (i = 0; i < priv->dma_rx_size; i++) { if (priv->rx_skbuff[i]) { dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], priv->dma_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_skbuff[i]); } priv->rx_skbuff[i] = NULL; } } static void dma_free_tx_skbufs(struct stmmac_priv *priv) { int i; for (i = 0; i < priv->dma_tx_size; i++) { if (priv->tx_skbuff[i] != NULL) { struct dma_desc *p = priv->dma_tx + i; if (p->des2) dma_unmap_single(priv->device, p->des2, priv->hw->desc->get_tx_len(p), DMA_TO_DEVICE); dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } } static void free_dma_desc_resources(struct stmmac_priv *priv) { /* Release the DMA TX/RX socket buffers */ dma_free_rx_skbufs(priv); dma_free_tx_skbufs(priv); /* Free the region of consistent memory previously allocated for * the DMA */ dma_free_coherent(priv->device, priv->dma_tx_size * sizeof(struct dma_desc), priv->dma_tx, priv->dma_tx_phy); dma_free_coherent(priv->device, priv->dma_rx_size * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); kfree(priv->rx_skbuff_dma); kfree(priv->rx_skbuff); kfree(priv->tx_skbuff); } /** * stmmac_dma_operation_mode - HW DMA operation mode * @priv : pointer to the private device structure. * Description: it sets the DMA operation mode: tx/rx DMA thresholds * or Store-And-Forward capability. */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { if (likely(priv->plat->force_sf_dma_mode || ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) { /* * In case of GMAC, SF mode can be enabled * to perform the TX COE in HW. This depends on: * 1) TX COE if actually supported * 2) There is no bugged Jumbo frame support * that needs to not insert csum in the TDES. */ priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); tc = SF_DMA_MODE; } else priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); } /** * stmmac_tx: * @priv: private driver structure * Description: it reclaims resources after transmission completes. */ static void stmmac_tx(struct stmmac_priv *priv) { unsigned int txsize = priv->dma_tx_size; spin_lock(&priv->tx_lock); while (priv->dirty_tx != priv->cur_tx) { int last; unsigned int entry = priv->dirty_tx % txsize; struct sk_buff *skb = priv->tx_skbuff[entry]; struct dma_desc *p = priv->dma_tx + entry; /* Check if the descriptor is owned by the DMA. */ if (priv->hw->desc->get_tx_owner(p)) break; /* Verify tx error by looking at the last segment */ last = priv->hw->desc->get_tx_ls(p); if (likely(last)) { int tx_error = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, priv->ioaddr); if (likely(tx_error == 0)) { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; } else priv->dev->stats.tx_errors++; } TX_DBG("%s: curr %d, dirty %d\n", __func__, priv->cur_tx, priv->dirty_tx); if (likely(p->des2)) dma_unmap_single(priv->device, p->des2, priv->hw->desc->get_tx_len(p), DMA_TO_DEVICE); priv->hw->ring->clean_desc3(p); if (likely(skb != NULL)) { /* * If there's room in the queue (limit it to size) * we add this skb back into the pool, * if it's the right size. */ if ((skb_queue_len(&priv->rx_recycle) < priv->dma_rx_size) && skb_recycle_check(skb, priv->dma_buf_sz)) __skb_queue_head(&priv->rx_recycle, skb); else dev_kfree_skb(skb); priv->tx_skbuff[entry] = NULL; } priv->hw->desc->release_tx_desc(p); entry = (++priv->dirty_tx) % txsize; } if (unlikely(netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { TX_DBG("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); } netif_tx_unlock(priv->dev); } spin_unlock(&priv->tx_lock); } static inline void stmmac_enable_irq(struct stmmac_priv *priv) { #ifdef CONFIG_STMMAC_TIMER if (likely(priv->tm->enable)) priv->tm->timer_start(tmrate); else #endif priv->hw->dma->enable_dma_irq(priv->ioaddr); } static inline void stmmac_disable_irq(struct stmmac_priv *priv) { #ifdef CONFIG_STMMAC_TIMER if (likely(priv->tm->enable)) priv->tm->timer_stop(); else #endif priv->hw->dma->disable_dma_irq(priv->ioaddr); } static int stmmac_has_work(struct stmmac_priv *priv) { unsigned int has_work = 0; int rxret, tx_work = 0; rxret = priv->hw->desc->get_rx_owner(priv->dma_rx + (priv->cur_rx % priv->dma_rx_size)); if (priv->dirty_tx != priv->cur_tx) tx_work = 1; if (likely(!rxret || tx_work)) has_work = 1; return has_work; } static inline void _stmmac_schedule(struct stmmac_priv *priv) { if (likely(stmmac_has_work(priv))) { stmmac_disable_irq(priv); napi_schedule(&priv->napi); } } #ifdef CONFIG_STMMAC_TIMER void stmmac_schedule(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); priv->xstats.sched_timer_n++; _stmmac_schedule(priv); } static void stmmac_no_timer_started(unsigned int x) {; }; static void stmmac_no_timer_stopped(void) {; }; #endif /** * stmmac_tx_err: * @priv: pointer to the private device structure * Description: it cleans the descriptors and restarts the transmission * in case of errors. */ static void stmmac_tx_err(struct stmmac_priv *priv) { netif_stop_queue(priv->dev); priv->hw->dma->stop_tx(priv->ioaddr); dma_free_tx_skbufs(priv); priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); priv->dirty_tx = 0; priv->cur_tx = 0; priv->hw->dma->start_tx(priv->ioaddr); priv->dev->stats.tx_errors++; netif_wake_queue(priv->dev); } static void stmmac_dma_interrupt(struct stmmac_priv *priv) { int status; status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); if (likely(status == handle_tx_rx)) _stmmac_schedule(priv); else if (unlikely(status == tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { tc += 64; priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); priv->xstats.threshold = tc; } } else if (unlikely(status == tx_hard_error)) stmmac_tx_err(priv); } static void stmmac_mmc_setup(struct stmmac_priv *priv) { unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; /* Mask MMC irq, counters are managed in SW and registers * are cleared on each READ eventually. */ dwmac_mmc_intr_all_mask(priv->ioaddr); if (priv->dma_cap.rmon) { dwmac_mmc_ctrl(priv->ioaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else pr_info(" No MAC Management Counters available"); } static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) { u32 hwid = priv->hw->synopsys_uid; /* Only check valid Synopsys Id because old MAC chips * have no HW registers where get the ID */ if (likely(hwid)) { u32 uid = ((hwid & 0x0000ff00) >> 8); u32 synid = (hwid & 0x000000ff); pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", uid, synid); return synid; } return 0; } /** * stmmac_selec_desc_mode * @dev : device pointer * Description: select the Enhanced/Alternate or Normal descriptors */ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { if (priv->plat->enh_desc) { pr_info(" Enhanced/Alternate descriptors\n"); priv->hw->desc = &enh_desc_ops; } else { pr_info(" Normal descriptors\n"); priv->hw->desc = &ndesc_ops; } } /** * stmmac_get_hw_features * @priv : private device pointer * Description: * new GMAC chip generations have a new register to indicate the * presence of the optional feature/functions. * This can be also used to override the value passed through the * platform and necessary for old MAC10/100 and GMAC chips. */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { u32 hw_cap = 0; if (priv->hw->dma->get_hw_feature) { hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5; priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; priv->dma_cap.pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; priv->dma_cap.pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; /* MMC */ priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; /* IEEE 1588-2002*/ priv->dma_cap.time_stamp = (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; /* IEEE 1588-2008*/ priv->dma_cap.atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; /* 802.3az - Energy-Efficient Ethernet (EEE) */ priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; /* TX and RX csum */ priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; priv->dma_cap.rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; priv->dma_cap.rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; priv->dma_cap.rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; /* TX and RX number of channels */ priv->dma_cap.number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; priv->dma_cap.number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; /* Alternate (enhanced) DESC mode*/ priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; } return hw_cap; } /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. * Description: * This function is the open entry point of the driver. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int ret; /* Check that the MAC address is valid. If its not, refuse * to bring the device up. The user must specify an * address using the following linux command: * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { random_ether_addr(dev->dev_addr); pr_warning("%s: generated random MAC address %pM\n", dev->name, dev->dev_addr); } stmmac_verify_args(); #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) { pr_err("%s: ERROR: timer memory alloc failed\n", __func__); return -ENOMEM; } priv->tm->freq = tmrate; /* Test if the external timer can be actually used. * In case of failure continue without timer. */ if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { pr_warning("stmmaceth: cannot attach the external timer.\n"); priv->tm->freq = 0; priv->tm->timer_start = stmmac_no_timer_started; priv->tm->timer_stop = stmmac_no_timer_stopped; } else priv->tm->enable = 1; #endif ret = stmmac_init_phy(dev); if (unlikely(ret)) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); goto open_error; } stmmac_get_synopsys_id(priv); priv->hw_cap_support = stmmac_get_hw_features(priv); if (priv->hw_cap_support) { pr_info(" Support DMA HW capability register"); /* We can override some gmac/dma configuration fields: e.g. * enh_desc, tx_coe (e.g. that are passed through the * platform) with the values from the HW capability * register (if supported). */ priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->tx_coe = priv->dma_cap.tx_coe; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; /* By default disable wol on magic frame if not supported */ if (!priv->dma_cap.pmt_magic_frame) priv->wolopts &= ~WAKE_MAGIC; } else pr_info(" No HW DMA feature register supported"); /* Select the enhnaced/normal descriptor structures */ stmmac_selec_desc_mode(priv); /* PMT module is not integrated in all the MAC devices. */ if (priv->plat->pmt) { pr_info(" Remote wake-up capable\n"); device_set_wakeup_capable(priv->device, 1); } priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); if (priv->rx_coe) pr_info(" Checksum Offload Engine supported\n"); if (priv->plat->tx_coe) pr_info(" Checksum insertion supported\n"); /* Create and initialize the TX/RX descriptors chains. */ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); init_dma_desc_rings(dev); /* DMA initialization and SW reset */ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, priv->dma_tx_phy, priv->dma_rx_phy); if (ret < 0) { pr_err("%s: DMA initialization failed\n", __func__); goto open_error; } /* Copy the MAC addr into the HW */ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); /* If required, perform hw setup of the bus. */ if (priv->plat->bus_setup) priv->plat->bus_setup(priv->ioaddr); /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->ioaddr); netdev_update_features(dev); /* Request the IRQ lines */ ret = request_irq(dev->irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); goto open_error; } /* Enable the MAC Rx/Tx */ stmmac_enable_mac(priv->ioaddr); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); /* Extra statistics */ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; stmmac_mmc_setup(priv); /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); #ifdef CONFIG_STMMAC_TIMER priv->tm->timer_start(tmrate); #endif /* Dump DMA/MAC registers */ if (netif_msg_hw(priv)) { priv->hw->mac->dump_regs(priv->ioaddr); priv->hw->dma->dump_regs(priv->ioaddr); } if (priv->phydev) phy_start(priv->phydev); napi_enable(&priv->napi); skb_queue_head_init(&priv->rx_recycle); netif_start_queue(dev); return 0; open_error: #ifdef CONFIG_STMMAC_TIMER kfree(priv->tm); #endif if (priv->phydev) phy_disconnect(priv->phydev); return ret; } /** * stmmac_release - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); /* Stop and disconnect the PHY */ if (priv->phydev) { phy_stop(priv->phydev); phy_disconnect(priv->phydev); priv->phydev = NULL; } netif_stop_queue(dev); #ifdef CONFIG_STMMAC_TIMER /* Stop and release the timer */ stmmac_close_ext_timer(); if (priv->tm != NULL) kfree(priv->tm); #endif napi_disable(&priv->napi); skb_queue_purge(&priv->rx_recycle); /* Free the IRQ lines */ free_irq(dev->irq, dev); /* Stop TX/RX DMA and clear the descriptors */ priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ stmmac_disable_mac(priv->ioaddr); netif_carrier_off(dev); return 0; } /** * stmmac_xmit: * @skb : the socket buffer * @dev : device pointer * Description : Tx entry point of the driver. */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int entry; int i, csum_insertion = 0; int nfrags = skb_shinfo(skb)->nr_frags; struct dma_desc *desc, *first; unsigned int nopaged_len = skb_headlen(skb); if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ pr_err("%s: BUG! Tx Ring full when queue awake\n", __func__); } return NETDEV_TX_BUSY; } spin_lock(&priv->tx_lock); entry = priv->cur_tx % txsize; #ifdef STMMAC_XMIT_DEBUG if ((skb->len > ETH_FRAME_LEN) || nfrags) pr_info("stmmac xmit:\n" "\tskb addr %p - len: %d - nopaged_len: %d\n" "\tn_frags: %d - ip_summed: %d - %s gso\n", skb, skb->len, nopaged_len, nfrags, skb->ip_summed, !skb_is_gso(skb) ? "isn't" : "is"); #endif csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); desc = priv->dma_tx + entry; first = desc; #ifdef STMMAC_XMIT_DEBUG if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" "\t\tn_frags: %d, ip_summed: %d\n", skb->len, nopaged_len, nfrags, skb->ip_summed); #endif priv->tx_skbuff[entry] = skb; if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); desc = priv->dma_tx + entry; } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion); } for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; TX_DBG("\t[entry %d] segment len: %d\n", entry, len); desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); priv->tx_skbuff[entry] = NULL; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); wmb(); priv->hw->desc->set_tx_owner(desc); wmb(); } /* Interrupt on completition only for the latest segment */ priv->hw->desc->close_tx_desc(desc); #ifdef CONFIG_STMMAC_TIMER /* Clean IC while using timer */ if (likely(priv->tm->enable)) priv->hw->desc->clear_tx_ic(desc); #endif wmb(); /* To avoid raise condition */ priv->hw->desc->set_tx_owner(first); wmb(); priv->cur_tx++; #ifdef STMMAC_XMIT_DEBUG if (netif_msg_pktdata(priv)) { pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " "first=%p, nfrags=%d\n", (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); display_ring(priv->dma_tx, txsize); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } #endif if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { TX_DBG("%s: stop transmitted packets\n", __func__); netif_stop_queue(dev); } dev->stats.tx_bytes += skb->len; skb_tx_timestamp(skb); priv->hw->dma->enable_dma_transmission(priv->ioaddr); spin_unlock(&priv->tx_lock); return NETDEV_TX_OK; } static inline void stmmac_rx_refill(struct stmmac_priv *priv) { unsigned int rxsize = priv->dma_rx_size; int bfsize = priv->dma_buf_sz; struct dma_desc *p = priv->dma_rx; for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { unsigned int entry = priv->dirty_rx % rxsize; if (likely(priv->rx_skbuff[entry] == NULL)) { struct sk_buff *skb; skb = __skb_dequeue(&priv->rx_recycle); if (skb == NULL) skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); if (unlikely(skb == NULL)) break; priv->rx_skbuff[entry] = skb; priv->rx_skbuff_dma[entry] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); (p + entry)->des2 = priv->rx_skbuff_dma[entry]; if (unlikely(priv->plat->has_gmac)) priv->hw->ring->refill_desc3(bfsize, p + entry); RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); } wmb(); priv->hw->desc->set_rx_owner(p + entry); wmb(); } } static int stmmac_rx(struct stmmac_priv *priv, int limit) { unsigned int rxsize = priv->dma_rx_size; unsigned int entry = priv->cur_rx % rxsize; unsigned int next_entry; unsigned int count = 0; struct dma_desc *p = priv->dma_rx + entry; struct dma_desc *p_next; #ifdef STMMAC_RX_DEBUG if (netif_msg_hw(priv)) { pr_debug(">>> stmmac_rx: descriptor ring:\n"); display_ring(priv->dma_rx, rxsize); } #endif count = 0; while (!priv->hw->desc->get_rx_owner(p)) { int status; if (count >= limit) break; count++; next_entry = (++priv->cur_rx) % rxsize; p_next = priv->dma_rx + next_entry; prefetch(p_next); /* read the status of the incoming frame */ status = (priv->hw->desc->rx_status(&priv->dev->stats, &priv->xstats, p)); if (unlikely(status == discard_frame)) priv->dev->stats.rx_errors++; else { struct sk_buff *skb; int frame_len; frame_len = priv->hw->desc->get_rx_frame_len(p); /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) */ if (unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; #ifdef STMMAC_RX_DEBUG if (frame_len > ETH_FRAME_LEN) pr_debug("\tRX frame size %d, COE status: %d\n", frame_len, status); if (netif_msg_hw(priv)) pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", p, entry, p->des2); #endif skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { pr_err("%s: Inconsistent Rx descriptor chain\n", priv->dev->name); priv->dev->stats.rx_dropped++; break; } prefetch(skb->data - NET_IP_ALIGN); priv->rx_skbuff[entry] = NULL; skb_put(skb, frame_len); dma_unmap_single(priv->device, priv->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); #ifdef STMMAC_RX_DEBUG if (netif_msg_pktdata(priv)) { pr_info(" frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); } #endif skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(!priv->rx_coe)) { /* No RX COE for old mac10/100 devices */ skb_checksum_none_assert(skb); netif_receive_skb(skb); } else { skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); } priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } entry = next_entry; p = p_next; /* use prefetched values */ } stmmac_rx_refill(priv); priv->xstats.rx_pkt_n += count; return count; } /** * stmmac_poll - stmmac poll method (NAPI) * @napi : pointer to the napi structure. * @budget : maximum number of packets that the current CPU can receive from * all interfaces. * Description : * This function implements the the reception process. * Also it runs the TX completion thread */ static int stmmac_poll(struct napi_struct *napi, int budget) { struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); int work_done = 0; priv->xstats.poll_n++; stmmac_tx(priv); work_done = stmmac_rx(priv, budget); if (work_done < budget) { napi_complete(napi); stmmac_enable_irq(priv); } return work_done; } /** * stmmac_tx_timeout * @dev : Pointer to net device structure * Description: this function is called when a packet transmission fails to * complete within a reasonable tmrate. The driver will mark the error in the * netdev structure and arrange for the device to be reset to a sane state * in order to transmit a new packet. */ static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); /* Clear Tx resources and restart transmitting again */ stmmac_tx_err(priv); } /* Configuration changes (passed on by ifconfig) */ static int stmmac_config(struct net_device *dev, struct ifmap *map) { if (dev->flags & IFF_UP) /* can't act on a running interface */ return -EBUSY; /* Don't allow changing the I/O address */ if (map->base_addr != dev->base_addr) { pr_warning("%s: can't change I/O address\n", dev->name); return -EOPNOTSUPP; } /* Don't allow changing the IRQ */ if (map->irq != dev->irq) { pr_warning("%s: can't change IRQ number %d\n", dev->name, dev->irq); return -EOPNOTSUPP; } /* ignore other fields */ return 0; } /** * stmmac_set_rx_mode - entry point for multicast addressing * @dev : pointer to the device structure * Description: * This function is a driver entry point which gets called by the kernel * whenever multicast addresses must be enabled/disabled. * Return value: * void. */ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); spin_lock(&priv->lock); priv->hw->mac->set_filter(dev); spin_unlock(&priv->lock); } /** * stmmac_change_mtu - entry point to change MTU size for the device. * @dev : device pointer. * @new_mtu : the new MTU size for the device. * Description: the Maximum Transfer Unit (MTU) is used by the network layer * to drive packet transmission. Ethernet has an MTU of 1500 octets * (ETH_DATA_LEN). This value can be changed with ifconfig. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); int max_mtu; if (netif_running(dev)) { pr_err("%s: must be stopped to change its MTU\n", dev->name); return -EBUSY; } if (priv->plat->enh_desc) max_mtu = JUMBO_LEN; else max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); if ((new_mtu < 46) || (new_mtu > max_mtu)) { pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); return -EINVAL; } dev->mtu = new_mtu; netdev_update_features(dev); return 0; } static u32 stmmac_fix_features(struct net_device *dev, u32 features) { struct stmmac_priv *priv = netdev_priv(dev); if (!priv->rx_coe) features &= ~NETIF_F_RXCSUM; if (!priv->plat->tx_coe) features &= ~NETIF_F_ALL_CSUM; /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable * the TX csum insertionin the TDES and not use SF. */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_ALL_CSUM; return features; } static irqreturn_t stmmac_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); if (unlikely(!dev)) { pr_err("%s: invalid dev pointer\n", __func__); return IRQ_NONE; } if (priv->plat->has_gmac) /* To handle GMAC own interrupts */ priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); stmmac_dma_interrupt(priv); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling receive - used by NETCONSOLE and other diagnostic tools * to allow network I/O with interrupts disabled. */ static void stmmac_poll_controller(struct net_device *dev) { disable_irq(dev->irq); stmmac_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /** * stmmac_ioctl - Entry point for the Ioctl * @dev: Device pointer. * @rq: An IOCTL specefic structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * @cmd: IOCTL command * Description: * Currently there are no special functionality supported in IOCTL, just the * phy_mii_ioctl(...) can be invoked. */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct stmmac_priv *priv = netdev_priv(dev); int ret; if (!netif_running(dev)) return -EINVAL; if (!priv->phydev) return -EINVAL; ret = phy_mii_ioctl(priv->phydev, rq, cmd); return ret; } #ifdef CONFIG_STMMAC_DEBUG_FS static struct dentry *stmmac_fs_dir; static struct dentry *stmmac_rings_status; static struct dentry *stmmac_dma_cap; static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) { struct tmp_s { u64 a; unsigned int b; unsigned int c; }; int i; struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); seq_printf(seq, "=======================\n"); seq_printf(seq, " RX descriptor ring\n"); seq_printf(seq, "=======================\n"); for (i = 0; i < priv->dma_rx_size; i++) { struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", i, (unsigned int)(x->a), (unsigned int)((x->a) >> 32), x->b, x->c); seq_printf(seq, "\n"); } seq_printf(seq, "\n"); seq_printf(seq, "=======================\n"); seq_printf(seq, " TX descriptor ring\n"); seq_printf(seq, "=======================\n"); for (i = 0; i < priv->dma_tx_size; i++) { struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", i, (unsigned int)(x->a), (unsigned int)((x->a) >> 32), x->b, x->c); seq_printf(seq, "\n"); } return 0; } static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) { return single_open(file, stmmac_sysfs_ring_read, inode->i_private); } static const struct file_operations stmmac_rings_status_fops = { .owner = THIS_MODULE, .open = stmmac_sysfs_ring_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); if (!priv->hw_cap_support) { seq_printf(seq, "DMA HW features not supported\n"); return 0; } seq_printf(seq, "==============================\n"); seq_printf(seq, "\tDMA HW features\n"); seq_printf(seq, "==============================\n"); seq_printf(seq, "\t10/100 Mbps %s\n", (priv->dma_cap.mbps_10_100) ? "Y" : "N"); seq_printf(seq, "\t1000 Mbps %s\n", (priv->dma_cap.mbps_1000) ? "Y" : "N"); seq_printf(seq, "\tHalf duple %s\n", (priv->dma_cap.half_duplex) ? "Y" : "N"); seq_printf(seq, "\tHash Filter: %s\n", (priv->dma_cap.hash_filter) ? "Y" : "N"); seq_printf(seq, "\tMultiple MAC address registers: %s\n", (priv->dma_cap.multi_addr) ? "Y" : "N"); seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", (priv->dma_cap.sma_mdio) ? "Y" : "N"); seq_printf(seq, "\tPMT Remote wake up: %s\n", (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); seq_printf(seq, "\tPMT Magic Frame: %s\n", (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); seq_printf(seq, "\tRMON module: %s\n", (priv->dma_cap.rmon) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", (priv->dma_cap.time_stamp) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", (priv->dma_cap.atime_stamp) ? "Y" : "N"); seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", (priv->dma_cap.eee) ? "Y" : "N"); seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); seq_printf(seq, "\tNumber of Additional RX channel: %d\n", priv->dma_cap.number_rx_channel); seq_printf(seq, "\tNumber of Additional TX channel: %d\n", priv->dma_cap.number_tx_channel); seq_printf(seq, "\tEnhanced descriptors: %s\n", (priv->dma_cap.enh_desc) ? "Y" : "N"); return 0; } static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) { return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); } static const struct file_operations stmmac_dma_cap_fops = { .owner = THIS_MODULE, .open = stmmac_sysfs_dma_cap_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int stmmac_init_fs(struct net_device *dev) { /* Create debugfs entries */ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { pr_err("ERROR %s, debugfs create directory failed\n", STMMAC_RESOURCE_NAME); return -ENOMEM; } /* Entry to report DMA RX/TX rings */ stmmac_rings_status = debugfs_create_file("descriptors_status", S_IRUGO, stmmac_fs_dir, dev, &stmmac_rings_status_fops); if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { pr_info("ERROR creating stmmac ring debugfs file\n"); debugfs_remove(stmmac_fs_dir); return -ENOMEM; } /* Entry to report the DMA HW features */ stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, dev, &stmmac_dma_cap_fops); if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { pr_info("ERROR creating stmmac MMC debugfs file\n"); debugfs_remove(stmmac_rings_status); debugfs_remove(stmmac_fs_dir); return -ENOMEM; } return 0; } static void stmmac_exit_fs(void) { debugfs_remove(stmmac_rings_status); debugfs_remove(stmmac_dma_cap); debugfs_remove(stmmac_fs_dir); } #endif /* CONFIG_STMMAC_DEBUG_FS */ static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, .ndo_start_xmit = stmmac_xmit, .ndo_stop = stmmac_release, .ndo_change_mtu = stmmac_change_mtu, .ndo_fix_features = stmmac_fix_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, .ndo_set_config = stmmac_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = stmmac_poll_controller, #endif .ndo_set_mac_address = eth_mac_addr, }; /** * stmmac_probe - Initialization of the adapter . * @dev : device pointer * Description: The function initializes the network device structure for * the STMMAC driver. It also calls the low level routines * in order to init the HW (i.e. the DMA engine) */ static int stmmac_probe(struct net_device *dev) { int ret = 0; struct stmmac_priv *priv = netdev_priv(dev); ether_setup(dev); dev->netdev_ops = &stmmac_netdev_ops; stmmac_set_ethtool_ops(dev); dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ dev->features |= NETIF_F_HW_VLAN_RX; #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ priv->pause = pause; netif_napi_add(dev, &priv->napi, stmmac_poll, 64); /* Get the MAC address */ priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr, dev->dev_addr, 0); if (!is_valid_ether_addr(dev->dev_addr)) pr_warning("\tno valid MAC address;" "please, use ifconfig or nwhwconfig!\n"); spin_lock_init(&priv->lock); spin_lock_init(&priv->tx_lock); ret = register_netdev(dev); if (ret) { pr_err("%s: ERROR %i registering the device\n", __func__, ret); return -ENODEV; } DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", (dev->features & NETIF_F_IP_CSUM) ? "on" : "off"); return ret; } /** * stmmac_mac_device_setup * @dev : device pointer * Description: select and initialise the mac device (mac100 or Gmac). */ static int stmmac_mac_device_setup(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct mac_device_info *device; if (priv->plat->has_gmac) { dev->priv_flags |= IFF_UNICAST_FLT; device = dwmac1000_setup(priv->ioaddr); } else { device = dwmac100_setup(priv->ioaddr); } if (!device) return -ENOMEM; priv->hw = device; priv->hw->ring = &ring_mode_ops; if (device_can_wakeup(priv->device)) { priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ enable_irq_wake(priv->wol_irq); } return 0; } /** * stmmac_dvr_probe * @pdev: platform device pointer * Description: the driver is initialized through platform_device. */ static int stmmac_dvr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; void __iomem *addr = NULL; struct net_device *ndev = NULL; struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat; pr_info("STMMAC driver:\n\tplatform registration... "); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; pr_info("\tdone!\n"); if (!request_mem_region(res->start, resource_size(res), pdev->name)) { pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", __func__, (unsigned int)res->start); return -EBUSY; } addr = ioremap(res->start, resource_size(res)); if (!addr) { pr_err("%s: ERROR: memory mapping failed\n", __func__); ret = -ENOMEM; goto out_release_region; } ndev = alloc_etherdev(sizeof(struct stmmac_priv)); if (!ndev) { pr_err("%s: ERROR: allocating the device\n", __func__); ret = -ENOMEM; goto out_unmap; } SET_NETDEV_DEV(ndev, &pdev->dev); /* Get the MAC information */ ndev->irq = platform_get_irq_byname(pdev, "macirq"); if (ndev->irq == -ENXIO) { pr_err("%s: ERROR: MAC IRQ configuration " "information not found\n", __func__); ret = -ENXIO; goto out_free_ndev; } priv = netdev_priv(ndev); priv->device = &(pdev->dev); priv->dev = ndev; plat_dat = pdev->dev.platform_data; priv->plat = plat_dat; priv->ioaddr = addr; /* * On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code * named as "eth_wake_irq" * * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); if (priv->wol_irq == -ENXIO) priv->wol_irq = ndev->irq; platform_set_drvdata(pdev, ndev); /* Set the I/O base addr */ ndev->base_addr = (unsigned long)addr; /* Custom initialisation */ if (priv->plat->init) { ret = priv->plat->init(pdev); if (unlikely(ret)) goto out_free_ndev; } /* MAC HW device detection */ ret = stmmac_mac_device_setup(ndev); if (ret < 0) goto out_plat_exit; /* Network Device Registration */ ret = stmmac_probe(ndev); if (ret < 0) goto out_plat_exit; /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, pdev->id, ndev->irq, addr); /* MDIO bus Registration */ pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id); ret = stmmac_mdio_register(ndev); if (ret < 0) goto out_unregister; pr_debug("registered!\n"); #ifdef CONFIG_STMMAC_DEBUG_FS ret = stmmac_init_fs(ndev); if (ret < 0) pr_warning("\tFailed debugFS registration"); #endif return 0; out_unregister: unregister_netdev(ndev); out_plat_exit: if (priv->plat->exit) priv->plat->exit(pdev); out_free_ndev: free_netdev(ndev); platform_set_drvdata(pdev, NULL); out_unmap: iounmap(addr); out_release_region: release_mem_region(res->start, resource_size(res)); return ret; } /** * stmmac_dvr_remove * @pdev: platform device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX * changes the link status, releases the DMA descriptor rings, * unregisters the MDIO bus and unmaps the allocated memory. */ static int stmmac_dvr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); struct resource *res; pr_info("%s:\n\tremoving driver", __func__); priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); stmmac_disable_mac(priv->ioaddr); netif_carrier_off(ndev); stmmac_mdio_unregister(ndev); if (priv->plat->exit) priv->plat->exit(pdev); platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); iounmap((void *)priv->ioaddr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); #ifdef CONFIG_STMMAC_DEBUG_FS stmmac_exit_fs(); #endif free_netdev(ndev); return 0; } #ifdef CONFIG_PM static int stmmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); int dis_ic = 0; if (!ndev || !netif_running(ndev)) return 0; if (priv->phydev) phy_stop(priv->phydev); spin_lock(&priv->lock); netif_device_detach(ndev); netif_stop_queue(ndev); #ifdef CONFIG_STMMAC_TIMER priv->tm->timer_stop(); if (likely(priv->tm->enable)) dis_ic = 1; #endif napi_disable(&priv->napi); /* Stop TX/RX DMA */ priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); /* Clear the Rx/Tx descriptors */ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, dis_ic); priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); else stmmac_disable_mac(priv->ioaddr); spin_unlock(&priv->lock); return 0; } static int stmmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); if (!netif_running(ndev)) return 0; spin_lock(&priv->lock); /* Power Down bit, into the PM register, is cleared * automatically as soon as a magic packet or a Wake-up frame * is received. Anyway, it's better to manually clear * this bit because it can generate problems while resuming * from another devices (e.g. serial console). */ if (device_may_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, 0); netif_device_attach(ndev); /* Enable the MAC and DMA */ stmmac_enable_mac(priv->ioaddr); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); #ifdef CONFIG_STMMAC_TIMER if (likely(priv->tm->enable)) priv->tm->timer_start(tmrate); #endif napi_enable(&priv->napi); netif_start_queue(ndev); spin_unlock(&priv->lock); if (priv->phydev) phy_start(priv->phydev); return 0; } static int stmmac_freeze(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); if (!ndev || !netif_running(ndev)) return 0; return stmmac_release(ndev); } static int stmmac_restore(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); if (!ndev || !netif_running(ndev)) return 0; return stmmac_open(ndev); } static const struct dev_pm_ops stmmac_pm_ops = { .suspend = stmmac_suspend, .resume = stmmac_resume, .freeze = stmmac_freeze, .thaw = stmmac_restore, .restore = stmmac_restore, }; #else static const struct dev_pm_ops stmmac_pm_ops; #endif /* CONFIG_PM */ static struct platform_driver stmmac_driver = { .probe = stmmac_dvr_probe, .remove = stmmac_dvr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, .owner = THIS_MODULE, .pm = &stmmac_pm_ops, }, }; /** * stmmac_init_module - Entry point for the driver * Description: This function is the entry point for the driver. */ static int __init stmmac_init_module(void) { int ret; ret = platform_driver_register(&stmmac_driver); return ret; } /** * stmmac_cleanup_module - Cleanup routine for the driver * Description: This function is the cleanup routine for the driver. */ static void __exit stmmac_cleanup_module(void) { platform_driver_unregister(&stmmac_driver); } #ifndef MODULE static int __init stmmac_cmdline_opt(char *str) { char *opt; if (!str || !*str) return -EINVAL; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug)) goto err; } else if (!strncmp(opt, "phyaddr:", 8)) { if (strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr)) goto err; } else if (!strncmp(opt, "dma_txsize:", 11)) { if (strict_strtoul(opt + 11, 0, (unsigned long *)&dma_txsize)) goto err; } else if (!strncmp(opt, "dma_rxsize:", 11)) { if (strict_strtoul(opt + 11, 0, (unsigned long *)&dma_rxsize)) goto err; } else if (!strncmp(opt, "buf_sz:", 7)) { if (strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz)) goto err; } else if (!strncmp(opt, "tc:", 3)) { if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc)) goto err; } else if (!strncmp(opt, "watchdog:", 9)) { if (strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog)) goto err; } else if (!strncmp(opt, "flow_ctrl:", 10)) { if (strict_strtoul(opt + 10, 0, (unsigned long *)&flow_ctrl)) goto err; } else if (!strncmp(opt, "pause:", 6)) { if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause)) goto err; #ifdef CONFIG_STMMAC_TIMER } else if (!strncmp(opt, "tmrate:", 7)) { if (strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate)) goto err; #endif } } return 0; err: pr_err("%s: ERROR broken module parameter conversion", __func__); return -EINVAL; } __setup("stmmaceth=", stmmac_cmdline_opt); #endif module_init(stmmac_init_module); module_exit(stmmac_cleanup_module); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL");
gpl-2.0
xkfz007/binutils-gdb
gdb/testsuite/gdb.base/gcore.c
36
1515
/* Copyright 2002-2015 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Test GDB's ability to save and reload a corefile. */ #include <stdlib.h> #include <string.h> int extern_array[4] = {1, 2, 3, 4}; static int static_array[4] = {5, 6, 7, 8}; static int un_initialized_array[4]; static char *heap_string; void terminal_func () { return; } void array_func () { int local_array[4]; int i; heap_string = (char *) malloc (80); strcpy (heap_string, "I'm a little teapot, short and stout..."); for (i = 0; i < 4; i++) { un_initialized_array[i] = extern_array[i] + 8; local_array[i] = extern_array[i] + 12; } terminal_func (); } int factorial_func (int value) { if (value > 1) { value *= factorial_func (value - 1); } array_func (); return (value); } int main() { factorial_func (6); return 0; }
gpl-2.0
martinbrook/tesco-hudl
net/core/sock.c
292
66200
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic socket support routines. Memory allocators, socket lock/release * handler for protocols to use and generic option handler. * * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Florian La Roche, <flla@stud.uni-sb.de> * Alan Cox, <A.Cox@swansea.ac.uk> * * Fixes: * Alan Cox : Numerous verify_area() problems * Alan Cox : Connecting on a connecting socket * now returns an error for tcp. * Alan Cox : sock->protocol is set correctly. * and is not sometimes left as 0. * Alan Cox : connect handles icmp errors on a * connect properly. Unfortunately there * is a restart syscall nasty there. I * can't match BSD without hacking the C * library. Ideas urgently sought! * Alan Cox : Disallow bind() to addresses that are * not ours - especially broadcast ones!! * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, * instead they leave that for the DESTROY timer. * Alan Cox : Clean up error flag in accept * Alan Cox : TCP ack handling is buggy, the DESTROY timer * was buggy. Put a remove_sock() in the handler * for memory when we hit 0. Also altered the timer * code. The ACK stuff can wait and needs major * TCP layer surgery. * Alan Cox : Fixed TCP ack bug, removed remove sock * and fixed timer/inet_bh race. * Alan Cox : Added zapped flag for TCP * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... * Rick Sladkey : Relaxed UDP rules for matching packets. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support * Pauline Middelink : identd support * Alan Cox : Fixed connect() taking signals I think. * Alan Cox : SO_LINGER supported * Alan Cox : Error reporting fixes * Anonymous : inet_create tidied up (sk->reuse setting) * Alan Cox : inet sockets don't set sk->type! * Alan Cox : Split socket option code * Alan Cox : Callbacks * Alan Cox : Nagle flag for Charles & Johannes stuff * Alex : Removed restriction on inet fioctl * Alan Cox : Splitting INET from NET core * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code * Alan Cox : Split IP from generic code * Alan Cox : New kfree_skbmem() * Alan Cox : Make SO_DEBUG superuser only. * Alan Cox : Allow anyone to clear SO_DEBUG * (compatibility fix) * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. * Alan Cox : Allocator for a socket is settable. * Alan Cox : SO_ERROR includes soft errors. * Alan Cox : Allow NULL arguments on some SO_ opts * Alan Cox : Generic socket allocation to make hooks * easier (suggested by Craig Metz). * Michael Pall : SO_ERROR returns positive errno again * Steve Whitehouse: Added default destructor to free * protocol private data. * Steve Whitehouse: Added various other default routines * common to several socket families. * Chris Evans : Call suser() check last on F_SETOWN * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() * Andi Kleen : Fix write_space callback * Chris Evans : Security fixes - signedness again * Arnaldo C. Melo : cleanups, use skb_queue_purge * * To Fix: * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/tcp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/user_namespace.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/netdevice.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/request_sock.h> #include <net/sock.h> #include <linux/net_tstamp.h> #include <net/xfrm.h> #include <linux/ipsec.h> #include <net/cls_cgroup.h> #include <linux/filter.h> #ifdef CONFIG_INET #include <net/tcp.h> #endif /* * Each address family might have different locking rules, so we have * one slock key per address family: */ static struct lock_class_key af_family_keys[AF_MAX]; static struct lock_class_key af_family_slock_keys[AF_MAX]; /* * Make lock validator output more readable. (we pre-construct these * strings build-time, so that runtime initialization of socket * locks is fast): */ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , "slock-27" , "slock-28" , "slock-AF_CAN" , "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , "clock-27" , "clock-28" , "clock-AF_CAN" , "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , "clock-AF_MAX" }; /* * sk_callback_lock locking rules are per-address-family, * so split the lock classes by using a per-AF key: */ static struct lock_class_key af_callback_keys[AF_MAX]; /* Take into consideration the size of the struct sk_buff overhead in the * determination of these values, since that is non-constant across * platforms. This makes socket queueing behavior and performance * not depend upon such differences. */ #define _SK_MEM_PACKETS 256 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) /* Run time adjustable parameters. */ __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; /* Maximal space eaten by iovec or ancillary data plus some space */ int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); EXPORT_SYMBOL(sysctl_optmem_max); #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) int net_cls_subsys_id = -1; EXPORT_SYMBOL_GPL(net_cls_subsys_id); #endif static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) { struct timeval tv; if (optlen < sizeof(tv)) return -EINVAL; if (copy_from_user(&tv, optval, sizeof(tv))) return -EFAULT; if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) return -EDOM; if (tv.tv_sec < 0) { static int warned __read_mostly; *timeo_p = 0; if (warned < 10 && net_ratelimit()) { warned++; printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " "tries to set negative timeout\n", current->comm, task_pid_nr(current)); } return 0; } *timeo_p = MAX_SCHEDULE_TIMEOUT; if (tv.tv_sec == 0 && tv.tv_usec == 0) return 0; if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); return 0; } static void sock_warn_obsolete_bsdism(const char *name) { static int warned; static char warncomm[TASK_COMM_LEN]; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); printk(KERN_WARNING "process `%s' is using obsolete " "%s SO_BSDCOMPAT\n", warncomm, name); warned++; } } static void sock_disable_timestamp(struct sock *sk, int flag) { if (sock_flag(sk, flag)) { sock_reset_flag(sk, flag); if (!sock_flag(sk, SOCK_TIMESTAMP) && !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { net_disable_timestamp(); } } } int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) { atomic_inc(&sk->sk_drops); return -ENOMEM; } err = sk_filter(sk, skb); if (err) return err; if (!sk_rmem_schedule(sk, skb->truesize)) { atomic_inc(&sk->sk_drops); return -ENOBUFS; } skb->dev = NULL; skb_set_owner_r(skb, sk); /* Cache the SKB length before we tack it onto the receive * queue. Once it is added it no longer belongs to us and * may be freed by other threads of control pulling packets * from the queue. */ skb_len = skb->len; /* we escape from rcu protected region, make sure we dont leak * a norefcounted dst */ skb_dst_force(skb); spin_lock_irqsave(&list->lock, flags); skb->dropcount = atomic_read(&sk->sk_drops); __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); return 0; } EXPORT_SYMBOL(sock_queue_rcv_skb); int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) { int rc = NET_RX_SUCCESS; if (sk_filter(sk, skb)) goto discard_and_relse; skb->dev = NULL; if (sk_rcvqueues_full(sk, skb)) { atomic_inc(&sk->sk_drops); goto discard_and_relse; } if (nested) bh_lock_sock_nested(sk); else bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { /* * trylock + unlock semantics: */ mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); rc = sk_backlog_rcv(sk, skb); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); } else if (sk_add_backlog(sk, skb)) { bh_unlock_sock(sk); atomic_inc(&sk->sk_drops); goto discard_and_relse; } bh_unlock_sock(sk); out: sock_put(sk); return rc; discard_and_relse: kfree_skb(skb); goto out; } EXPORT_SYMBOL(sk_receive_skb); void sk_reset_txq(struct sock *sk) { sk_tx_queue_clear(sk); } EXPORT_SYMBOL(sk_reset_txq); struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) { struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_tx_queue_clear(sk); rcu_assign_pointer(sk->sk_dst_cache, NULL); dst_release(dst); return NULL; } return dst; } EXPORT_SYMBOL(__sk_dst_check); struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) { struct dst_entry *dst = sk_dst_get(sk); if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_dst_reset(sk); dst_release(dst); return NULL; } return dst; } EXPORT_SYMBOL(sk_dst_check); static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES struct net *net = sock_net(sk); char devname[IFNAMSIZ]; int index; /* Sorry... */ ret = -EPERM; if (!capable(CAP_NET_RAW)) goto out; ret = -EINVAL; if (optlen < 0) goto out; /* Bind this socket to a particular device like "eth0", * as specified in the passed interface name. If the * name is "" or the option length is zero the socket * is not bound. */ if (optlen > IFNAMSIZ - 1) optlen = IFNAMSIZ - 1; memset(devname, 0, sizeof(devname)); ret = -EFAULT; if (copy_from_user(devname, optval, optlen)) goto out; index = 0; if (devname[0] != '\0') { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_name_rcu(net, devname); if (dev) index = dev->ifindex; rcu_read_unlock(); ret = -ENODEV; if (!dev) goto out; } lock_sock(sk); sk->sk_bound_dev_if = index; sk_dst_reset(sk); release_sock(sk); ret = 0; out: #endif return ret; } static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) { if (valbool) sock_set_flag(sk, bit); else sock_reset_flag(sk, bit); } /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. */ int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int val; int valbool; struct linger ling; int ret = 0; /* * Options without arguments */ if (optname == SO_BINDTODEVICE) return sock_bindtodevice(sk, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val ? 1 : 0; lock_sock(sk); switch (optname) { case SO_DEBUG: if (val && !capable(CAP_NET_ADMIN)) ret = -EACCES; else sock_valbool_flag(sk, SOCK_DBG, valbool); break; case SO_REUSEADDR: sk->sk_reuse = valbool; break; case SO_TYPE: case SO_PROTOCOL: case SO_DOMAIN: case SO_ERROR: ret = -ENOPROTOOPT; break; case SO_DONTROUTE: sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); break; case SO_BROADCAST: sock_valbool_flag(sk, SOCK_BROADCAST, valbool); break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_wmem_max) val = sysctl_wmem_max; set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; if ((val * 2) < SOCK_MIN_SNDBUF) sk->sk_sndbuf = SOCK_MIN_SNDBUF; else sk->sk_sndbuf = val * 2; /* * Wake up sending tasks if we * upped the value. */ sk->sk_write_space(sk); break; case SO_SNDBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_sndbuf; case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_rmem_max) val = sysctl_rmem_max; set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* * We double it on the way in to account for * "struct sk_buff" etc. overhead. Applications * assume that the SO_RCVBUF setting they make will * allow that much actual data to be received on that * socket. * * Applications are unaware that "struct sk_buff" and * other overheads allocate from the receive buffer * during socket buffer allocation. * * And after considering the possible alternatives, * returning the value we actually used in getsockopt * is the most desirable behavior. */ if ((val * 2) < SOCK_MIN_RCVBUF) sk->sk_rcvbuf = SOCK_MIN_RCVBUF; else sk->sk_rcvbuf = val * 2; break; case SO_RCVBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_rcvbuf; case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); break; case SO_OOBINLINE: sock_valbool_flag(sk, SOCK_URGINLINE, valbool); break; case SO_NO_CHECK: sk->sk_no_check = valbool; break; case SO_PRIORITY: if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) sk->sk_priority = val; else ret = -EPERM; break; case SO_LINGER: if (optlen < sizeof(ling)) { ret = -EINVAL; /* 1003.1g */ break; } if (copy_from_user(&ling, optval, sizeof(ling))) { ret = -EFAULT; break; } if (!ling.l_onoff) sock_reset_flag(sk, SOCK_LINGER); else { #if (BITS_PER_LONG == 32) if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else #endif sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_set_flag(sk, SOCK_LINGER); } break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("setsockopt"); break; case SO_PASSCRED: if (valbool) set_bit(SOCK_PASSCRED, &sock->flags); else clear_bit(SOCK_PASSCRED, &sock->flags); break; case SO_TIMESTAMP: case SO_TIMESTAMPNS: if (valbool) { if (optname == SO_TIMESTAMP) sock_reset_flag(sk, SOCK_RCVTSTAMPNS); else sock_set_flag(sk, SOCK_RCVTSTAMPNS); sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk, SOCK_TIMESTAMP); } else { sock_reset_flag(sk, SOCK_RCVTSTAMP); sock_reset_flag(sk, SOCK_RCVTSTAMPNS); } break; case SO_TIMESTAMPING: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; break; } sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, val & SOF_TIMESTAMPING_TX_HARDWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, val & SOF_TIMESTAMPING_TX_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, val & SOF_TIMESTAMPING_RX_HARDWARE); if (val & SOF_TIMESTAMPING_RX_SOFTWARE) sock_enable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); else sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, val & SOF_TIMESTAMPING_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, val & SOF_TIMESTAMPING_SYS_HARDWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, val & SOF_TIMESTAMPING_RAW_HARDWARE); break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_RCVTIMEO: ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); break; case SO_SNDTIMEO: ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); break; case SO_ATTACH_FILTER: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, sk); } break; case SO_DETACH_FILTER: ret = sk_detach_filter(sk); break; case SO_PASSSEC: if (valbool) set_bit(SOCK_PASSSEC, &sock->flags); else clear_bit(SOCK_PASSSEC, &sock->flags); break; case SO_MARK: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else sk->sk_mark = val; break; /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ case SO_RXQ_OVFL: if (valbool) sock_set_flag(sk, SOCK_RXQ_OVFL); else sock_reset_flag(sk, SOCK_RXQ_OVFL); break; default: ret = -ENOPROTOOPT; break; } release_sock(sk); return ret; } EXPORT_SYMBOL(sock_setsockopt); void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred) { ucred->pid = pid_vnr(pid); ucred->uid = ucred->gid = -1; if (cred) { struct user_namespace *current_ns = current_user_ns(); ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); } } EXPORT_SYMBOL_GPL(cred_to_ucred); int sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; union { int val; struct linger ling; struct timeval tm; } v; int lv = sizeof(int); int len; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; memset(&v, 0, sizeof(v)); switch (optname) { case SO_DEBUG: v.val = sock_flag(sk, SOCK_DBG); break; case SO_DONTROUTE: v.val = sock_flag(sk, SOCK_LOCALROUTE); break; case SO_BROADCAST: v.val = !!sock_flag(sk, SOCK_BROADCAST); break; case SO_SNDBUF: v.val = sk->sk_sndbuf; break; case SO_RCVBUF: v.val = sk->sk_rcvbuf; break; case SO_REUSEADDR: v.val = sk->sk_reuse; break; case SO_KEEPALIVE: v.val = !!sock_flag(sk, SOCK_KEEPOPEN); break; case SO_TYPE: v.val = sk->sk_type; break; case SO_PROTOCOL: v.val = sk->sk_protocol; break; case SO_DOMAIN: v.val = sk->sk_family; break; case SO_ERROR: v.val = -sock_error(sk); if (v.val == 0) v.val = xchg(&sk->sk_err_soft, 0); break; case SO_OOBINLINE: v.val = !!sock_flag(sk, SOCK_URGINLINE); break; case SO_NO_CHECK: v.val = sk->sk_no_check; break; case SO_PRIORITY: v.val = sk->sk_priority; break; case SO_LINGER: lv = sizeof(v.ling); v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); v.ling.l_linger = sk->sk_lingertime / HZ; break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("getsockopt"); break; case SO_TIMESTAMP: v.val = sock_flag(sk, SOCK_RCVTSTAMP) && !sock_flag(sk, SOCK_RCVTSTAMPNS); break; case SO_TIMESTAMPNS: v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); break; case SO_TIMESTAMPING: v.val = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) v.val |= SOF_TIMESTAMPING_TX_HARDWARE; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) v.val |= SOF_TIMESTAMPING_RX_HARDWARE; if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) v.val |= SOF_TIMESTAMPING_SOFTWARE; if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; break; case SO_RCVTIMEO: lv = sizeof(struct timeval); if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { v.tm.tv_sec = 0; v.tm.tv_usec = 0; } else { v.tm.tv_sec = sk->sk_rcvtimeo / HZ; v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; } break; case SO_SNDTIMEO: lv = sizeof(struct timeval); if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { v.tm.tv_sec = 0; v.tm.tv_usec = 0; } else { v.tm.tv_sec = sk->sk_sndtimeo / HZ; v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; } break; case SO_RCVLOWAT: v.val = sk->sk_rcvlowat; break; case SO_SNDLOWAT: v.val = 1; break; case SO_PASSCRED: v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; break; case SO_PEERCRED: { struct ucred peercred; if (len > sizeof(peercred)) len = sizeof(peercred); cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); if (copy_to_user(optval, &peercred, len)) return -EFAULT; goto lenout; } case SO_PEERNAME: { char address[128]; if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) return -ENOTCONN; if (lv < len) return -EINVAL; if (copy_to_user(optval, address, len)) return -EFAULT; goto lenout; } /* Dubious BSD thing... Probably nobody even uses it, but * the UNIX standard wants it for whatever reason... -DaveM */ case SO_ACCEPTCONN: v.val = sk->sk_state == TCP_LISTEN; break; case SO_PASSSEC: v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; break; case SO_PEERSEC: return security_socket_getpeersec_stream(sock, optval, optlen, len); case SO_MARK: v.val = sk->sk_mark; break; case SO_RXQ_OVFL: v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); break; default: return -ENOPROTOOPT; } if (len > lv) len = lv; if (copy_to_user(optval, &v, len)) return -EFAULT; lenout: if (put_user(len, optlen)) return -EFAULT; return 0; } /* * Initialize an sk_lock. * * (We also register the sk_lock with the lock validator.) */ static inline void sock_lock_init(struct sock *sk) { sock_lock_init_class_and_name(sk, af_family_slock_key_strings[sk->sk_family], af_family_slock_keys + sk->sk_family, af_family_key_strings[sk->sk_family], af_family_keys + sk->sk_family); } /* * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, * even temporarly, because of RCU lookups. sk_node should also be left as is. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end */ static void sock_copy(struct sock *nsk, const struct sock *osk) { #ifdef CONFIG_SECURITY_NETWORK void *sptr = nsk->sk_security; #endif memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); #ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; security_sk_clone(osk, nsk); #endif } /* * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes * un-modified. Special care is taken when initializing object to zero. */ static inline void sk_prot_clear_nulls(struct sock *sk, int size) { if (offsetof(struct sock, sk_node.next) != 0) memset(sk, 0, offsetof(struct sock, sk_node.next)); memset(&sk->sk_node.pprev, 0, size - offsetof(struct sock, sk_node.pprev)); } void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) { unsigned long nulls1, nulls2; nulls1 = offsetof(struct sock, __sk_common.skc_node.next); nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); if (nulls1 > nulls2) swap(nulls1, nulls2); if (nulls1 != 0) memset((char *)sk, 0, nulls1); memset((char *)sk + nulls1 + sizeof(void *), 0, nulls2 - nulls1 - sizeof(void *)); memset((char *)sk + nulls2 + sizeof(void *), 0, size - nulls2 - sizeof(void *)); } EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, int family) { struct sock *sk; struct kmem_cache *slab; slab = prot->slab; if (slab != NULL) { sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); if (!sk) return sk; if (priority & __GFP_ZERO) { if (prot->clear_sk) prot->clear_sk(sk, prot->obj_size); else sk_prot_clear_nulls(sk, prot->obj_size); } } else sk = kmalloc(prot->obj_size, priority); if (sk != NULL) { kmemcheck_annotate_bitfield(sk, flags); if (security_sk_alloc(sk, family, priority)) goto out_free; if (!try_module_get(prot->owner)) goto out_free_sec; sk_tx_queue_clear(sk); } return sk; out_free_sec: security_sk_free(sk); out_free: if (slab != NULL) kmem_cache_free(slab, sk); else kfree(sk); return NULL; } static void sk_prot_free(struct proto *prot, struct sock *sk) { struct kmem_cache *slab; struct module *owner; owner = prot->owner; slab = prot->slab; security_sk_free(sk); if (slab != NULL) kmem_cache_free(slab, sk); else kfree(sk); module_put(owner); } #ifdef CONFIG_CGROUPS void sock_update_classid(struct sock *sk) { u32 classid; rcu_read_lock(); /* doing current task, which cannot vanish. */ classid = task_cls_classid(current); rcu_read_unlock(); if (classid && classid != sk->sk_classid) sk->sk_classid = classid; } EXPORT_SYMBOL(sock_update_classid); #endif /** * sk_alloc - All socket objects are allocated here * @net: the applicable net namespace * @family: protocol family * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * @prot: struct proto associated with this new sock instance */ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) { struct sock *sk; sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); if (sk) { sk->sk_family = family; /* * See comment in struct sock definition to understand * why we need sk_prot_creator -acme */ sk->sk_prot = sk->sk_prot_creator = prot; sock_lock_init(sk); sock_net_set(sk, get_net(net)); atomic_set(&sk->sk_wmem_alloc, 1); sock_update_classid(sk); } return sk; } EXPORT_SYMBOL(sk_alloc); static void __sk_free(struct sock *sk) { struct sk_filter *filter; if (sk->sk_destruct) sk->sk_destruct(sk); filter = rcu_dereference_check(sk->sk_filter, atomic_read(&sk->sk_wmem_alloc) == 0); if (filter) { sk_filter_uncharge(sk, filter); rcu_assign_pointer(sk->sk_filter, NULL); } sock_disable_timestamp(sk, SOCK_TIMESTAMP); sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); if (atomic_read(&sk->sk_omem_alloc)) printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", __func__, atomic_read(&sk->sk_omem_alloc)); if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); put_net(sock_net(sk)); sk_prot_free(sk->sk_prot_creator, sk); } void sk_free(struct sock *sk) { /* * We subtract one from sk_wmem_alloc and can know if * some packets are still in some tx queue. * If not null, sock_wfree() will call __sk_free(sk) later */ if (atomic_dec_and_test(&sk->sk_wmem_alloc)) __sk_free(sk); } EXPORT_SYMBOL(sk_free); /* * Last sock_put should drop reference to sk->sk_net. It has already * been dropped in sk_change_net. Taking reference to stopping namespace * is not an option. * Take reference to a socket to remove it from hash _alive_ and after that * destroy it in the context of init_net. */ void sk_release_kernel(struct sock *sk) { if (sk == NULL || sk->sk_socket == NULL) return; sock_hold(sk); sock_release(sk->sk_socket); release_net(sock_net(sk)); sock_net_set(sk, get_net(&init_net)); sock_put(sk); } EXPORT_SYMBOL(sk_release_kernel); struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { struct sock *newsk; newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); if (newsk != NULL) { struct sk_filter *filter; sock_copy(newsk, sk); /* SANITY */ get_net(sock_net(newsk)); sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_backlog.len = 0; atomic_set(&newsk->sk_rmem_alloc, 0); /* * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ atomic_set(&newsk->sk_wmem_alloc, 1); atomic_set(&newsk->sk_omem_alloc, 0); skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_write_queue); #ifdef CONFIG_NET_DMA skb_queue_head_init(&newsk->sk_async_wait_queue); #endif spin_lock_init(&newsk->sk_dst_lock); rwlock_init(&newsk->sk_callback_lock); lockdep_set_class_and_name(&newsk->sk_callback_lock, af_callback_keys + newsk->sk_family, af_family_clock_key_strings[newsk->sk_family]); newsk->sk_dst_cache = NULL; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; newsk->sk_send_head = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); skb_queue_head_init(&newsk->sk_error_queue); filter = rcu_dereference_protected(newsk->sk_filter, 1); if (filter != NULL) sk_filter_charge(newsk, filter); if (unlikely(xfrm_sk_clone_policy(newsk))) { /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; bh_unlock_sock(newsk); sk_free(newsk); newsk = NULL; goto out; } newsk->sk_err = 0; newsk->sk_priority = 0; /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) */ smp_wmb(); atomic_set(&newsk->sk_refcnt, 2); /* * Increment the counter in the same struct proto as the master * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that * is the same as sk->sk_prot->socks, as this field was copied * with memcpy). * * This _changes_ the previous behaviour, where * tcp_create_openreq_child always was incrementing the * equivalent to tcp_prot->socks (inet_sock_nr), so this have * to be taken into account in all callers. -acme */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); newsk->sk_wq = NULL; if (newsk->sk_prot->sockets_allocated) percpu_counter_inc(newsk->sk_prot->sockets_allocated); if (sock_flag(newsk, SOCK_TIMESTAMP) || sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) net_enable_timestamp(); } out: return newsk; } EXPORT_SYMBOL_GPL(sk_clone); void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { __sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk->sk_route_caps &= ~sk->sk_route_nocaps; if (sk_can_gso(sk)) { if (dst->header_len) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; sk->sk_gso_max_segs = dst->dev->gso_max_segs; } } } EXPORT_SYMBOL_GPL(sk_setup_caps); void __init sk_init(void) { if (totalram_pages <= 4096) { sysctl_wmem_max = 32767; sysctl_rmem_max = 32767; sysctl_wmem_default = 32767; sysctl_rmem_default = 32767; } else if (totalram_pages >= 131072) { sysctl_wmem_max = 131071; sysctl_rmem_max = 131071; } } /* * Simple resource managers for sockets. */ /* * Write buffer destructor automatically called from kfree_skb. */ void sock_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; unsigned int len = skb->truesize; if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { /* * Keep a reference on sk_wmem_alloc, this will be released * after sk_write_space() call */ atomic_sub(len - 1, &sk->sk_wmem_alloc); sk->sk_write_space(sk); len = 1; } /* * if sk_wmem_alloc reaches 0, we must finish what sk_free() * could not do because of in-flight packets */ if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) __sk_free(sk); } EXPORT_SYMBOL(sock_wfree); /* * Read buffer destructor automatically called from kfree_skb. */ void sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; unsigned int len = skb->truesize; atomic_sub(len, &sk->sk_rmem_alloc); sk_mem_uncharge(sk, len); } EXPORT_SYMBOL(sock_rfree); int sock_i_uid(struct sock *sk) { int uid; read_lock_bh(&sk->sk_callback_lock); uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; read_unlock_bh(&sk->sk_callback_lock); return uid; } EXPORT_SYMBOL(sock_i_uid); unsigned long sock_i_ino(struct sock *sk) { unsigned long ino; read_lock_bh(&sk->sk_callback_lock); ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; read_unlock_bh(&sk->sk_callback_lock); return ino; } EXPORT_SYMBOL(sock_i_ino); /* * Allocate a skb from the socket's send buffer. */ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff *skb = alloc_skb(size, priority); if (skb) { skb_set_owner_w(skb, sk); return skb; } } return NULL; } EXPORT_SYMBOL(sock_wmalloc); /* * Allocate a skb from the socket's receive buffer. */ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); if (skb) { skb_set_owner_r(skb, sk); return skb; } } return NULL; } /* * Allocate a memory block from the socket's option memory buffer. */ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc * might sleep. */ atomic_add(size, &sk->sk_omem_alloc); mem = kmalloc(size, priority); if (mem) return mem; atomic_sub(size, &sk->sk_omem_alloc); } return NULL; } EXPORT_SYMBOL(sock_kmalloc); /* * Free an option memory block. */ void sock_kfree_s(struct sock *sk, void *mem, int size) { kfree(mem); atomic_sub(size, &sk->sk_omem_alloc); } EXPORT_SYMBOL(sock_kfree_s); /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. I think, these locks should be removed for datagram sockets. */ static long sock_wait_for_wmem(struct sock *sk, long timeo) { DEFINE_WAIT(wait); clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); for (;;) { if (!timeo) break; if (signal_pending(current)) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) break; if (sk->sk_shutdown & SEND_SHUTDOWN) break; if (sk->sk_err) break; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(sk), &wait); return timeo; } /* * Generic send/receive buffer handlers */ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode) { struct sk_buff *skb; gfp_t gfp_mask; long timeo; int err; int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; err = -EMSGSIZE; if (npages > MAX_SKB_FRAGS) goto failure; gfp_mask = sk->sk_allocation; if (gfp_mask & __GFP_WAIT) gfp_mask |= __GFP_REPEAT; timeo = sock_sndtimeo(sk, noblock); while (1) { err = sock_error(sk); if (err != 0) goto failure; err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { skb = alloc_skb(header_len, gfp_mask); if (skb) { int i; /* No pages, we're done... */ if (!data_len) break; skb->truesize += data_len; skb_shinfo(skb)->nr_frags = npages; for (i = 0; i < npages; i++) { struct page *page; skb_frag_t *frag; page = alloc_pages(sk->sk_allocation, 0); if (!page) { err = -ENOBUFS; skb_shinfo(skb)->nr_frags = i; kfree_skb(skb); goto failure; } frag = &skb_shinfo(skb)->frags[i]; frag->page = page; frag->page_offset = 0; frag->size = (data_len >= PAGE_SIZE ? PAGE_SIZE : data_len); data_len -= PAGE_SIZE; } /* Full success... */ break; } err = -ENOBUFS; goto failure; } set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; if (!timeo) goto failure; if (signal_pending(current)) goto interrupted; timeo = sock_wait_for_wmem(sk, timeo); } skb_set_owner_w(skb, sk); return skb; interrupted: err = sock_intr_errno(timeo); failure: *errcode = err; return NULL; } EXPORT_SYMBOL(sock_alloc_send_pskb); struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode) { return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); } EXPORT_SYMBOL(sock_alloc_send_skb); static void __lock_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_bh(&sk->sk_lock.slock); schedule(); spin_lock_bh(&sk->sk_lock.slock); if (!sock_owned_by_user(sk)) break; } finish_wait(&sk->sk_lock.wq, &wait); } static void __release_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) { struct sk_buff *skb = sk->sk_backlog.head; do { sk->sk_backlog.head = sk->sk_backlog.tail = NULL; bh_unlock_sock(sk); do { struct sk_buff *next = skb->next; WARN_ON_ONCE(skb_dst_is_noref(skb)); skb->next = NULL; sk_backlog_rcv(sk, skb); /* * We are in process context here with softirqs * disabled, use cond_resched_softirq() to preempt. * This is safe to do because we've taken the backlog * queue private: */ cond_resched_softirq(); skb = next; } while (skb != NULL); bh_lock_sock(sk); } while ((skb = sk->sk_backlog.head) != NULL); /* * Doing the zeroing here guarantee we can not loop forever * while a wild producer attempts to flood us. */ sk->sk_backlog.len = 0; } /** * sk_wait_data - wait for data to arrive at sk_receive_queue * @sk: sock to wait on * @timeo: for how long * * Now socket state including sk->sk_err is changed only under lock, * hence we may omit checks after joining wait queue. * We check receive queue before schedule() only as optimization; * it is very likely that release_sock() added new data. */ int sk_wait_data(struct sock *sk, long *timeo) { int rc; DEFINE_WAIT(wait); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); finish_wait(sk_sleep(sk), &wait); return rc; } EXPORT_SYMBOL(sk_wait_data); /** * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated * @sk: socket * @size: memory size to allocate * @kind: allocation type * * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means * rmem allocation. This function assumes that protocols which have * memory_pressure use sk_wmem_queued as write buffer accounting. */ int __sk_mem_schedule(struct sock *sk, int size, int kind) { struct proto *prot = sk->sk_prot; int amt = sk_mem_pages(size); long allocated; sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; allocated = atomic_long_add_return(amt, prot->memory_allocated); /* Under limit. */ if (allocated <= prot->sysctl_mem[0]) { if (prot->memory_pressure && *prot->memory_pressure) *prot->memory_pressure = 0; return 1; } /* Under pressure. */ if (allocated > prot->sysctl_mem[1]) if (prot->enter_memory_pressure) prot->enter_memory_pressure(sk); /* Over hard limit. */ if (allocated > prot->sysctl_mem[2]) goto suppress_allocation; /* guarantee minimum buffer size under pressure */ if (kind == SK_MEM_RECV) { if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) return 1; } else { /* SK_MEM_SEND */ if (sk->sk_type == SOCK_STREAM) { if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) return 1; } else if (atomic_read(&sk->sk_wmem_alloc) < prot->sysctl_wmem[0]) return 1; } if (prot->memory_pressure) { int alloc; if (!*prot->memory_pressure) return 1; alloc = percpu_counter_read_positive(prot->sockets_allocated); if (prot->sysctl_mem[2] > alloc * sk_mem_pages(sk->sk_wmem_queued + atomic_read(&sk->sk_rmem_alloc) + sk->sk_forward_alloc)) return 1; } suppress_allocation: if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { sk_stream_moderate_sndbuf(sk); /* Fail only if socket is _under_ its sndbuf. * In this case we cannot block, so that we have to fail. */ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) return 1; } /* Alas. Undo changes. */ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; atomic_long_sub(amt, prot->memory_allocated); return 0; } EXPORT_SYMBOL(__sk_mem_schedule); /** * __sk_reclaim - reclaim memory_allocated * @sk: socket */ void __sk_mem_reclaim(struct sock *sk) { struct proto *prot = sk->sk_prot; atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, prot->memory_allocated); sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; if (prot->memory_pressure && *prot->memory_pressure && (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) *prot->memory_pressure = 0; } EXPORT_SYMBOL(__sk_mem_reclaim); /* * Set of default routines for initialising struct proto_ops when * the protocol does not support a particular function. In certain * cases where it makes no sense for a protocol to have a "do nothing" * function, some default processing is provided. */ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_bind); int sock_no_connect(struct socket *sock, struct sockaddr *saddr, int len, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_connect); int sock_no_socketpair(struct socket *sock1, struct socket *sock2) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_socketpair); int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_accept); int sock_no_getname(struct socket *sock, struct sockaddr *saddr, int *len, int peer) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_getname); unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) { return 0; } EXPORT_SYMBOL(sock_no_poll); int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_ioctl); int sock_no_listen(struct socket *sock, int backlog) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_listen); int sock_no_shutdown(struct socket *sock, int how) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_shutdown); int sock_no_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_setsockopt); int sock_no_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_getsockopt); int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_sendmsg); int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_recvmsg); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { /* Mirror missing mmap method error code */ return -ENODEV; } EXPORT_SYMBOL(sock_no_mmap); ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; struct msghdr msg = {.msg_flags = flags}; struct kvec iov; char *kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; res = kernel_sendmsg(sock, &msg, &iov, 1, size); kunmap(page); return res; } EXPORT_SYMBOL(sock_no_sendpage); /* * Default Socket Callbacks */ static void sock_def_wakeup(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_all(&wq->wait); rcu_read_unlock(); } static void sock_def_error_report(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, POLLERR); sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); rcu_read_unlock(); } static void sock_def_readable(struct sock *sk, int len) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } static void sock_def_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); /* Do not wake up a writer until he can make "significant" * progress. --DaveM */ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } static void sock_def_destruct(struct sock *sk) { kfree(sk->sk_protinfo); } void sk_send_sigurg(struct sock *sk) { if (sk->sk_socket && sk->sk_socket->file) if (send_sigurg(&sk->sk_socket->file->f_owner)) sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); } EXPORT_SYMBOL(sk_send_sigurg); void sk_reset_timer(struct sock *sk, struct timer_list* timer, unsigned long expires) { if (!mod_timer(timer, expires)) sock_hold(sk); } EXPORT_SYMBOL(sk_reset_timer); void sk_stop_timer(struct sock *sk, struct timer_list* timer) { if (timer_pending(timer) && del_timer(timer)) __sock_put(sk); } EXPORT_SYMBOL(sk_stop_timer); void sock_init_data(struct socket *sock, struct sock *sk) { skb_queue_head_init(&sk->sk_receive_queue); skb_queue_head_init(&sk->sk_write_queue); skb_queue_head_init(&sk->sk_error_queue); #ifdef CONFIG_NET_DMA skb_queue_head_init(&sk->sk_async_wait_queue); #endif sk->sk_send_head = NULL; init_timer(&sk->sk_timer); sk->sk_allocation = GFP_KERNEL; sk->sk_rcvbuf = sysctl_rmem_default; sk->sk_sndbuf = sysctl_wmem_default; sk->sk_state = TCP_CLOSE; sk_set_socket(sk, sock); sock_set_flag(sk, SOCK_ZAPPED); if (sock) { sk->sk_type = sock->type; sk->sk_wq = sock->wq; sock->sk = sk; } else sk->sk_wq = NULL; spin_lock_init(&sk->sk_dst_lock); rwlock_init(&sk->sk_callback_lock); lockdep_set_class_and_name(&sk->sk_callback_lock, af_callback_keys + sk->sk_family, af_family_clock_key_strings[sk->sk_family]); sk->sk_state_change = sock_def_wakeup; sk->sk_data_ready = sock_def_readable; sk->sk_write_space = sock_def_write_space; sk->sk_error_report = sock_def_error_report; sk->sk_destruct = sock_def_destruct; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; sk->sk_peer_pid = NULL; sk->sk_peer_cred = NULL; sk->sk_write_pending = 0; sk->sk_rcvlowat = 1; sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_stamp = ktime_set(-1L, 0); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) */ smp_wmb(); atomic_set(&sk->sk_refcnt, 1); atomic_set(&sk->sk_drops, 0); } EXPORT_SYMBOL(sock_init_data); void lock_sock_nested(struct sock *sk, int subclass) { might_sleep(); spin_lock_bh(&sk->sk_lock.slock); if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; spin_unlock(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); local_bh_enable(); } EXPORT_SYMBOL(lock_sock_nested); void release_sock(struct sock *sk) { /* * The sk_lock has mutex_unlock() semantics: */ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); spin_lock_bh(&sk->sk_lock.slock); if (sk->sk_backlog.tail) __release_sock(sk); sk->sk_lock.owned = 0; if (waitqueue_active(&sk->sk_lock.wq)) wake_up(&sk->sk_lock.wq); spin_unlock_bh(&sk->sk_lock.slock); } EXPORT_SYMBOL(release_sock); /** * lock_sock_fast - fast version of lock_sock * @sk: socket * * This version should be used for very small section, where process wont block * return false if fast path is taken * sk_lock.slock locked, owned = 0, BH disabled * return true if slow path is taken * sk_lock.slock unlocked, owned = 1, BH enabled */ bool lock_sock_fast(struct sock *sk) { might_sleep(); spin_lock_bh(&sk->sk_lock.slock); if (!sk->sk_lock.owned) /* * Note : We must disable BH */ return false; __lock_sock(sk); sk->sk_lock.owned = 1; spin_unlock(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); local_bh_enable(); return true; } EXPORT_SYMBOL(lock_sock_fast); int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { struct timeval tv; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk, SOCK_TIMESTAMP); tv = ktime_to_timeval(sk->sk_stamp); if (tv.tv_sec == -1) return -ENOENT; if (tv.tv_sec == 0) { sk->sk_stamp = ktime_get_real(); tv = ktime_to_timeval(sk->sk_stamp); } return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; } EXPORT_SYMBOL(sock_get_timestamp); int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) { struct timespec ts; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk, SOCK_TIMESTAMP); ts = ktime_to_timespec(sk->sk_stamp); if (ts.tv_sec == -1) return -ENOENT; if (ts.tv_sec == 0) { sk->sk_stamp = ktime_get_real(); ts = ktime_to_timespec(sk->sk_stamp); } return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; } EXPORT_SYMBOL(sock_get_timestampns); void sock_enable_timestamp(struct sock *sk, int flag) { if (!sock_flag(sk, flag)) { sock_set_flag(sk, flag); /* * we just set one of the two flags which require net * time stamping, but time stamping might have been on * already because of the other one */ if (!sock_flag(sk, flag == SOCK_TIMESTAMP ? SOCK_TIMESTAMPING_RX_SOFTWARE : SOCK_TIMESTAMP)) net_enable_timestamp(); } } /* * Get a socket option on an socket. * * FIX: POSIX 1003.1g is very ambiguous here. It states that * asynchronous errors should be reported by getsockopt. We assume * this means if you specify SO_ERROR (otherwise whats the point of it). */ int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_getsockopt); #ifdef CONFIG_COMPAT int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; if (sk->sk_prot->compat_getsockopt != NULL) return sk->sk_prot->compat_getsockopt(sk, level, optname, optval, optlen); return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(compat_sock_common_getsockopt); #endif int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int addr_len = 0; int err; err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; return err; } EXPORT_SYMBOL(sock_common_recvmsg); /* * Set socket options on an inet socket. */ int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_setsockopt); #ifdef CONFIG_COMPAT int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; if (sk->sk_prot->compat_setsockopt != NULL) return sk->sk_prot->compat_setsockopt(sk, level, optname, optval, optlen); return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(compat_sock_common_setsockopt); #endif void sk_common_release(struct sock *sk) { if (sk->sk_prot->destroy) sk->sk_prot->destroy(sk); /* * Observation: when sock_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * * A. Remove from hash tables. */ sk->sk_prot->unhash(sk); /* * In this point socket cannot receive new packets, but it is possible * that some packets are in flight because some CPU runs receiver and * did hash table lookup before we unhashed socket. They will achieve * receive queue and will be purged by socket destructor. * * Also we still have packets pending on receive queue and probably, * our own packets waiting in device queues. sock_destroy will drain * receive queue, but transmitted packets will delay socket destruction * until the last reference will be released. */ sock_orphan(sk); xfrm_sk_free_policy(sk); sk_refcnt_debug_release(sk); sock_put(sk); } EXPORT_SYMBOL(sk_common_release); static DEFINE_RWLOCK(proto_list_lock); static LIST_HEAD(proto_list); #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { int val[PROTO_INUSE_NR]; }; static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); #ifdef CONFIG_NET_NS void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; int res = 0; for_each_possible_cpu(cpu) res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; return res >= 0 ? res : 0; } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); static int __net_init sock_inuse_init_net(struct net *net) { net->core.inuse = alloc_percpu(struct prot_inuse); return net->core.inuse ? 0 : -ENOMEM; } static void __net_exit sock_inuse_exit_net(struct net *net) { free_percpu(net->core.inuse); } static struct pernet_operations net_inuse_ops = { .init = sock_inuse_init_net, .exit = sock_inuse_exit_net, }; static __init int net_inuse_init(void) { if (register_pernet_subsys(&net_inuse_ops)) panic("Cannot initialize net inuse counters"); return 0; } core_initcall(net_inuse_init); #else static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; int res = 0; for_each_possible_cpu(cpu) res += per_cpu(prot_inuse, cpu).val[idx]; return res >= 0 ? res : 0; } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); #endif static void assign_proto_idx(struct proto *prot) { prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); return; } set_bit(prot->inuse_idx, proto_inuse_idx); } static void release_proto_idx(struct proto *prot) { if (prot->inuse_idx != PROTO_INUSE_NR - 1) clear_bit(prot->inuse_idx, proto_inuse_idx); } #else static inline void assign_proto_idx(struct proto *prot) { } static inline void release_proto_idx(struct proto *prot) { } #endif int proto_register(struct proto *prot, int alloc_slab) { if (alloc_slab) { prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, SLAB_HWCACHE_ALIGN | prot->slab_flags, NULL); if (prot->slab == NULL) { printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", prot->name); goto out; } if (prot->rsk_prot != NULL) { prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); if (prot->rsk_prot->slab_name == NULL) goto out_free_sock_slab; prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, prot->rsk_prot->obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (prot->rsk_prot->slab == NULL) { printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", prot->name); goto out_free_request_sock_slab_name; } } if (prot->twsk_prot != NULL) { prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); if (prot->twsk_prot->twsk_slab_name == NULL) goto out_free_request_sock_slab; prot->twsk_prot->twsk_slab = kmem_cache_create(prot->twsk_prot->twsk_slab_name, prot->twsk_prot->twsk_obj_size, 0, SLAB_HWCACHE_ALIGN | prot->slab_flags, NULL); if (prot->twsk_prot->twsk_slab == NULL) goto out_free_timewait_sock_slab_name; } } write_lock(&proto_list_lock); list_add(&prot->node, &proto_list); assign_proto_idx(prot); write_unlock(&proto_list_lock); return 0; out_free_timewait_sock_slab_name: kfree(prot->twsk_prot->twsk_slab_name); out_free_request_sock_slab: if (prot->rsk_prot && prot->rsk_prot->slab) { kmem_cache_destroy(prot->rsk_prot->slab); prot->rsk_prot->slab = NULL; } out_free_request_sock_slab_name: if (prot->rsk_prot) kfree(prot->rsk_prot->slab_name); out_free_sock_slab: kmem_cache_destroy(prot->slab); prot->slab = NULL; out: return -ENOBUFS; } EXPORT_SYMBOL(proto_register); void proto_unregister(struct proto *prot) { write_lock(&proto_list_lock); release_proto_idx(prot); list_del(&prot->node); write_unlock(&proto_list_lock); if (prot->slab != NULL) { kmem_cache_destroy(prot->slab); prot->slab = NULL; } if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { kmem_cache_destroy(prot->rsk_prot->slab); kfree(prot->rsk_prot->slab_name); prot->rsk_prot->slab = NULL; } if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { kmem_cache_destroy(prot->twsk_prot->twsk_slab); kfree(prot->twsk_prot->twsk_slab_name); prot->twsk_prot->twsk_slab = NULL; } } EXPORT_SYMBOL(proto_unregister); #ifdef CONFIG_PROC_FS static void *proto_seq_start(struct seq_file *seq, loff_t *pos) __acquires(proto_list_lock) { read_lock(&proto_list_lock); return seq_list_start_head(&proto_list, *pos); } static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &proto_list, pos); } static void proto_seq_stop(struct seq_file *seq, void *v) __releases(proto_list_lock) { read_unlock(&proto_list_lock); } static char proto_method_implemented(const void *method) { return method == NULL ? 'n' : 'y'; } static void proto_seq_printf(struct seq_file *seq, struct proto *proto) { seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", proto->name, proto->obj_size, sock_prot_inuse_get(seq_file_net(seq), proto), proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", proto->max_header, proto->slab == NULL ? "no" : "yes", module_name(proto->owner), proto_method_implemented(proto->close), proto_method_implemented(proto->connect), proto_method_implemented(proto->disconnect), proto_method_implemented(proto->accept), proto_method_implemented(proto->ioctl), proto_method_implemented(proto->init), proto_method_implemented(proto->destroy), proto_method_implemented(proto->shutdown), proto_method_implemented(proto->setsockopt), proto_method_implemented(proto->getsockopt), proto_method_implemented(proto->sendmsg), proto_method_implemented(proto->recvmsg), proto_method_implemented(proto->sendpage), proto_method_implemented(proto->bind), proto_method_implemented(proto->backlog_rcv), proto_method_implemented(proto->hash), proto_method_implemented(proto->unhash), proto_method_implemented(proto->get_port), proto_method_implemented(proto->enter_memory_pressure)); } static int proto_seq_show(struct seq_file *seq, void *v) { if (v == &proto_list) seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", "protocol", "size", "sockets", "memory", "press", "maxhdr", "slab", "module", "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); else proto_seq_printf(seq, list_entry(v, struct proto, node)); return 0; } static const struct seq_operations proto_seq_ops = { .start = proto_seq_start, .next = proto_seq_next, .stop = proto_seq_stop, .show = proto_seq_show, }; static int proto_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &proto_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations proto_seq_fops = { .owner = THIS_MODULE, .open = proto_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static __net_init int proto_init_net(struct net *net) { if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) return -ENOMEM; return 0; } static __net_exit void proto_exit_net(struct net *net) { proc_net_remove(net, "protocols"); } static __net_initdata struct pernet_operations proto_net_ops = { .init = proto_init_net, .exit = proto_exit_net, }; static int __init proto_init(void) { return register_pernet_subsys(&proto_net_ops); } subsys_initcall(proto_init); #endif /* PROC_FS */
gpl-2.0
endocode/linux
drivers/xen/xenbus/xenbus_comms.c
292
6310
/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/wait.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/err.h> #include <xen/xenbus.h> #include <asm/xen/hypervisor.h> #include <xen/events.h> #include <xen/page.h> #include "xenbus_comms.h" static int xenbus_irq; static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { if (unlikely(xenstored_ready == 0)) { xenstored_ready = 1; schedule_work(&probe_work); } wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ virt_mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ virt_wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ virt_rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ virt_mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; if (intf->req_prod != intf->req_cons) pr_err("request ring is not quiescent (%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { pr_warn("response ring is not quiescent (%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); /* breaks kdump */ if (!reset_devices) intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { int err; err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err < 0) { pr_err("request irq failed %i\n", err); return err; } xenbus_irq = err; } return 0; } void xb_deinit_comms(void) { unbind_from_irqhandler(xenbus_irq, &xb_waitq); xenbus_irq = 0; }
gpl-2.0
manveru0/kernel_I9001_cfs
kernel/trace/trace_events_filter.c
548
30146
/* * trace_events_filter - generic event filtering * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <linux/perf_event.h> #include <linux/slab.h> #include "trace.h" #include "trace_output.h" enum filter_op_ids { OP_OR, OP_AND, OP_GLOB, OP_NE, OP_EQ, OP_LT, OP_LE, OP_GT, OP_GE, OP_NONE, OP_OPEN_PAREN, }; struct filter_op { int id; char *string; int precedence; }; static struct filter_op filter_ops[] = { { OP_OR, "||", 1 }, { OP_AND, "&&", 2 }, { OP_GLOB, "~", 4 }, { OP_NE, "!=", 4 }, { OP_EQ, "==", 4 }, { OP_LT, "<", 5 }, { OP_LE, "<=", 5 }, { OP_GT, ">", 5 }, { OP_GE, ">=", 5 }, { OP_NONE, "OP_NONE", 0 }, { OP_OPEN_PAREN, "(", 0 }, }; enum { FILT_ERR_NONE, FILT_ERR_INVALID_OP, FILT_ERR_UNBALANCED_PAREN, FILT_ERR_TOO_MANY_OPERANDS, FILT_ERR_OPERAND_TOO_LONG, FILT_ERR_FIELD_NOT_FOUND, FILT_ERR_ILLEGAL_FIELD_OP, FILT_ERR_ILLEGAL_INTVAL, FILT_ERR_BAD_SUBSYS_FILTER, FILT_ERR_TOO_MANY_PREDS, FILT_ERR_MISSING_FIELD, FILT_ERR_INVALID_FILTER, }; static char *err_text[] = { "No error", "Invalid operator", "Unbalanced parens", "Too many operands", "Operand too long", "Field not found", "Illegal operation for field type", "Illegal integer value", "Couldn't find or set field in one of a subsystem's events", "Too many terms in predicate expression", "Missing field name and/or value", "Meaningless filter expression", }; struct opstack_op { int op; struct list_head list; }; struct postfix_elt { int op; char *operand; struct list_head list; }; struct filter_parse_state { struct filter_op *ops; struct list_head opstack; struct list_head postfix; int lasterr; int lasterr_pos; struct { char *string; unsigned int cnt; unsigned int tail; } infix; struct { char string[MAX_FILTER_STR_VAL]; int pos; unsigned int tail; } operand; }; #define DEFINE_COMPARISON_PRED(type) \ static int filter_pred_##type(struct filter_pred *pred, void *event, \ int val1, int val2) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ int match = 0; \ \ switch (pred->op) { \ case OP_LT: \ match = (*addr < val); \ break; \ case OP_LE: \ match = (*addr <= val); \ break; \ case OP_GT: \ match = (*addr > val); \ break; \ case OP_GE: \ match = (*addr >= val); \ break; \ default: \ break; \ } \ \ return match; \ } #define DEFINE_EQUALITY_PRED(size) \ static int filter_pred_##size(struct filter_pred *pred, void *event, \ int val1, int val2) \ { \ u##size *addr = (u##size *)(event + pred->offset); \ u##size val = (u##size)pred->val; \ int match; \ \ match = (val == *addr) ^ pred->not; \ \ return match; \ } DEFINE_COMPARISON_PRED(s64); DEFINE_COMPARISON_PRED(u64); DEFINE_COMPARISON_PRED(s32); DEFINE_COMPARISON_PRED(u32); DEFINE_COMPARISON_PRED(s16); DEFINE_COMPARISON_PRED(u16); DEFINE_COMPARISON_PRED(s8); DEFINE_COMPARISON_PRED(u8); DEFINE_EQUALITY_PRED(64); DEFINE_EQUALITY_PRED(32); DEFINE_EQUALITY_PRED(16); DEFINE_EQUALITY_PRED(8); static int filter_pred_and(struct filter_pred *pred __attribute((unused)), void *event __attribute((unused)), int val1, int val2) { return val1 && val2; } static int filter_pred_or(struct filter_pred *pred __attribute((unused)), void *event __attribute((unused)), int val1, int val2) { return val1 || val2; } /* Filter predicate for fixed sized arrays of characters */ static int filter_pred_string(struct filter_pred *pred, void *event, int val1, int val2) { char *addr = (char *)(event + pred->offset); int cmp, match; cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); match = cmp ^ pred->not; return match; } /* Filter predicate for char * pointers */ static int filter_pred_pchar(struct filter_pred *pred, void *event, int val1, int val2) { char **addr = (char **)(event + pred->offset); int cmp, match; int len = strlen(*addr) + 1; /* including tailing '\0' */ cmp = pred->regex.match(*addr, &pred->regex, len); match = cmp ^ pred->not; return match; } /* * Filter predicate for dynamic sized arrays of characters. * These are implemented through a list of strings at the end * of the entry. * Also each of these strings have a field in the entry which * contains its offset from the beginning of the entry. * We have then first to get this field, dereference it * and add it to the address of the entry, and at last we have * the address of the string. */ static int filter_pred_strloc(struct filter_pred *pred, void *event, int val1, int val2) { u32 str_item = *(u32 *)(event + pred->offset); int str_loc = str_item & 0xffff; int str_len = str_item >> 16; char *addr = (char *)(event + str_loc); int cmp, match; cmp = pred->regex.match(addr, &pred->regex, str_len); match = cmp ^ pred->not; return match; } static int filter_pred_none(struct filter_pred *pred, void *event, int val1, int val2) { return 0; } /* * regex_match_foo - Basic regex callbacks * * @str: the string to be searched * @r: the regex structure containing the pattern string * @len: the length of the string to be searched (including '\0') * * Note: * - @str might not be NULL-terminated if it's of type DYN_STRING * or STATIC_STRING */ static int regex_match_full(char *str, struct regex *r, int len) { if (strncmp(str, r->pattern, len) == 0) return 1; return 0; } static int regex_match_front(char *str, struct regex *r, int len) { if (strncmp(str, r->pattern, r->len) == 0) return 1; return 0; } static int regex_match_middle(char *str, struct regex *r, int len) { if (strnstr(str, r->pattern, len)) return 1; return 0; } static int regex_match_end(char *str, struct regex *r, int len) { int strlen = len - 1; if (strlen >= r->len && memcmp(str + strlen - r->len, r->pattern, r->len) == 0) return 1; return 0; } /** * filter_parse_regex - parse a basic regex * @buff: the raw regex * @len: length of the regex * @search: will point to the beginning of the string to compare * @not: tell whether the match will have to be inverted * * This passes in a buffer containing a regex and this function will * set search to point to the search part of the buffer and * return the type of search it is (see enum above). * This does modify buff. * * Returns enum type. * search returns the pointer to use for comparison. * not returns 1 if buff started with a '!' * 0 otherwise. */ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) { int type = MATCH_FULL; int i; if (buff[0] == '!') { *not = 1; buff++; len--; } else *not = 0; *search = buff; for (i = 0; i < len; i++) { if (buff[i] == '*') { if (!i) { *search = buff + 1; type = MATCH_END_ONLY; } else { if (type == MATCH_END_ONLY) type = MATCH_MIDDLE_ONLY; else type = MATCH_FRONT_ONLY; buff[i] = 0; break; } } } return type; } static void filter_build_regex(struct filter_pred *pred) { struct regex *r = &pred->regex; char *search; enum regex_type type = MATCH_FULL; int not = 0; if (pred->op == OP_GLOB) { type = filter_parse_regex(r->pattern, r->len, &search, &not); r->len = strlen(search); memmove(r->pattern, search, r->len+1); } switch (type) { case MATCH_FULL: r->match = regex_match_full; break; case MATCH_FRONT_ONLY: r->match = regex_match_front; break; case MATCH_MIDDLE_ONLY: r->match = regex_match_middle; break; case MATCH_END_ONLY: r->match = regex_match_end; break; } pred->not ^= not; } /* return 1 if event matches, 0 otherwise (discard) */ int filter_match_preds(struct event_filter *filter, void *rec) { int match, top = 0, val1 = 0, val2 = 0; int stack[MAX_FILTER_PRED]; struct filter_pred *pred; int i; for (i = 0; i < filter->n_preds; i++) { pred = filter->preds[i]; if (!pred->pop_n) { match = pred->fn(pred, rec, val1, val2); stack[top++] = match; continue; } if (pred->pop_n > top) { WARN_ON_ONCE(1); return 0; } val1 = stack[--top]; val2 = stack[--top]; match = pred->fn(pred, rec, val1, val2); stack[top++] = match; } return stack[--top]; } EXPORT_SYMBOL_GPL(filter_match_preds); static void parse_error(struct filter_parse_state *ps, int err, int pos) { ps->lasterr = err; ps->lasterr_pos = pos; } static void remove_filter_string(struct event_filter *filter) { kfree(filter->filter_string); filter->filter_string = NULL; } static int replace_filter_string(struct event_filter *filter, char *filter_string) { kfree(filter->filter_string); filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) return -ENOMEM; return 0; } static int append_filter_string(struct event_filter *filter, char *string) { int newlen; char *new_filter_string; BUG_ON(!filter->filter_string); newlen = strlen(filter->filter_string) + strlen(string) + 1; new_filter_string = kmalloc(newlen, GFP_KERNEL); if (!new_filter_string) return -ENOMEM; strcpy(new_filter_string, filter->filter_string); strcat(new_filter_string, string); kfree(filter->filter_string); filter->filter_string = new_filter_string; return 0; } static void append_filter_err(struct filter_parse_state *ps, struct event_filter *filter) { int pos = ps->lasterr_pos; char *buf, *pbuf; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return; append_filter_string(filter, "\n"); memset(buf, ' ', PAGE_SIZE); if (pos > PAGE_SIZE - 128) pos = 0; buf[pos] = '^'; pbuf = &buf[pos] + 1; sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]); append_filter_string(filter, buf); free_page((unsigned long) buf); } void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) { struct event_filter *filter = call->filter; mutex_lock(&event_mutex); if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_printf(s, "none\n"); mutex_unlock(&event_mutex); } void print_subsystem_event_filter(struct event_subsystem *system, struct trace_seq *s) { struct event_filter *filter = system->filter; mutex_lock(&event_mutex); if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_printf(s, "none\n"); mutex_unlock(&event_mutex); } static struct ftrace_event_field * find_event_field(struct ftrace_event_call *call, char *name) { struct ftrace_event_field *field; struct list_head *head; head = trace_get_fields(call); list_for_each_entry(field, head, link) { if (!strcmp(field->name, name)) return field; } return NULL; } static void filter_free_pred(struct filter_pred *pred) { if (!pred) return; kfree(pred->field_name); kfree(pred); } static void filter_clear_pred(struct filter_pred *pred) { kfree(pred->field_name); pred->field_name = NULL; pred->regex.len = 0; } static int filter_set_pred(struct filter_pred *dest, struct filter_pred *src, filter_pred_fn_t fn) { *dest = *src; if (src->field_name) { dest->field_name = kstrdup(src->field_name, GFP_KERNEL); if (!dest->field_name) return -ENOMEM; } dest->fn = fn; return 0; } static void filter_disable_preds(struct ftrace_event_call *call) { struct event_filter *filter = call->filter; int i; call->flags &= ~TRACE_EVENT_FL_FILTERED; filter->n_preds = 0; for (i = 0; i < MAX_FILTER_PRED; i++) filter->preds[i]->fn = filter_pred_none; } static void __free_preds(struct event_filter *filter) { int i; if (!filter) return; for (i = 0; i < MAX_FILTER_PRED; i++) { if (filter->preds[i]) filter_free_pred(filter->preds[i]); } kfree(filter->preds); kfree(filter->filter_string); kfree(filter); } void destroy_preds(struct ftrace_event_call *call) { __free_preds(call->filter); call->filter = NULL; call->flags &= ~TRACE_EVENT_FL_FILTERED; } static struct event_filter *__alloc_preds(void) { struct event_filter *filter; struct filter_pred *pred; int i; filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) return ERR_PTR(-ENOMEM); filter->n_preds = 0; filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL); if (!filter->preds) goto oom; for (i = 0; i < MAX_FILTER_PRED; i++) { pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) goto oom; pred->fn = filter_pred_none; filter->preds[i] = pred; } return filter; oom: __free_preds(filter); return ERR_PTR(-ENOMEM); } static int init_preds(struct ftrace_event_call *call) { if (call->filter) return 0; call->flags &= ~TRACE_EVENT_FL_FILTERED; call->filter = __alloc_preds(); if (IS_ERR(call->filter)) return PTR_ERR(call->filter); return 0; } static int init_subsystem_preds(struct event_subsystem *system) { struct ftrace_event_call *call; int err; list_for_each_entry(call, &ftrace_events, list) { if (!call->class || !call->class->define_fields) continue; if (strcmp(call->class->system, system->name) != 0) continue; err = init_preds(call); if (err) return err; } return 0; } static void filter_free_subsystem_preds(struct event_subsystem *system) { struct ftrace_event_call *call; list_for_each_entry(call, &ftrace_events, list) { if (!call->class || !call->class->define_fields) continue; if (strcmp(call->class->system, system->name) != 0) continue; filter_disable_preds(call); remove_filter_string(call->filter); } } static int filter_add_pred_fn(struct filter_parse_state *ps, struct ftrace_event_call *call, struct event_filter *filter, struct filter_pred *pred, filter_pred_fn_t fn) { int idx, err; if (filter->n_preds == MAX_FILTER_PRED) { parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); return -ENOSPC; } idx = filter->n_preds; filter_clear_pred(filter->preds[idx]); err = filter_set_pred(filter->preds[idx], pred, fn); if (err) return err; filter->n_preds++; return 0; } int filter_assign_type(const char *type) { if (strstr(type, "__data_loc") && strstr(type, "char")) return FILTER_DYN_STRING; if (strchr(type, '[') && strstr(type, "char")) return FILTER_STATIC_STRING; return FILTER_OTHER; } static bool is_string_field(struct ftrace_event_field *field) { return field->filter_type == FILTER_DYN_STRING || field->filter_type == FILTER_STATIC_STRING || field->filter_type == FILTER_PTR_STRING; } static int is_legal_op(struct ftrace_event_field *field, int op) { if (is_string_field(field) && (op != OP_EQ && op != OP_NE && op != OP_GLOB)) return 0; if (!is_string_field(field) && op == OP_GLOB) return 0; return 1; } static filter_pred_fn_t select_comparison_fn(int op, int field_size, int field_is_signed) { filter_pred_fn_t fn = NULL; switch (field_size) { case 8: if (op == OP_EQ || op == OP_NE) fn = filter_pred_64; else if (field_is_signed) fn = filter_pred_s64; else fn = filter_pred_u64; break; case 4: if (op == OP_EQ || op == OP_NE) fn = filter_pred_32; else if (field_is_signed) fn = filter_pred_s32; else fn = filter_pred_u32; break; case 2: if (op == OP_EQ || op == OP_NE) fn = filter_pred_16; else if (field_is_signed) fn = filter_pred_s16; else fn = filter_pred_u16; break; case 1: if (op == OP_EQ || op == OP_NE) fn = filter_pred_8; else if (field_is_signed) fn = filter_pred_s8; else fn = filter_pred_u8; break; } return fn; } static int filter_add_pred(struct filter_parse_state *ps, struct ftrace_event_call *call, struct event_filter *filter, struct filter_pred *pred, bool dry_run) { struct ftrace_event_field *field; filter_pred_fn_t fn; unsigned long long val; int ret; pred->fn = filter_pred_none; if (pred->op == OP_AND) { pred->pop_n = 2; fn = filter_pred_and; goto add_pred_fn; } else if (pred->op == OP_OR) { pred->pop_n = 2; fn = filter_pred_or; goto add_pred_fn; } field = find_event_field(call, pred->field_name); if (!field) { parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0); return -EINVAL; } pred->offset = field->offset; if (!is_legal_op(field, pred->op)) { parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0); return -EINVAL; } if (is_string_field(field)) { filter_build_regex(pred); if (field->filter_type == FILTER_STATIC_STRING) { fn = filter_pred_string; pred->regex.field_len = field->size; } else if (field->filter_type == FILTER_DYN_STRING) fn = filter_pred_strloc; else fn = filter_pred_pchar; } else { if (field->is_signed) ret = strict_strtoll(pred->regex.pattern, 0, &val); else ret = strict_strtoull(pred->regex.pattern, 0, &val); if (ret) { parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); return -EINVAL; } pred->val = val; fn = select_comparison_fn(pred->op, field->size, field->is_signed); if (!fn) { parse_error(ps, FILT_ERR_INVALID_OP, 0); return -EINVAL; } } if (pred->op == OP_NE) pred->not = 1; add_pred_fn: if (!dry_run) return filter_add_pred_fn(ps, call, filter, pred, fn); return 0; } static void parse_init(struct filter_parse_state *ps, struct filter_op *ops, char *infix_string) { memset(ps, '\0', sizeof(*ps)); ps->infix.string = infix_string; ps->infix.cnt = strlen(infix_string); ps->ops = ops; INIT_LIST_HEAD(&ps->opstack); INIT_LIST_HEAD(&ps->postfix); } static char infix_next(struct filter_parse_state *ps) { ps->infix.cnt--; return ps->infix.string[ps->infix.tail++]; } static char infix_peek(struct filter_parse_state *ps) { if (ps->infix.tail == strlen(ps->infix.string)) return 0; return ps->infix.string[ps->infix.tail]; } static void infix_advance(struct filter_parse_state *ps) { ps->infix.cnt--; ps->infix.tail++; } static inline int is_precedence_lower(struct filter_parse_state *ps, int a, int b) { return ps->ops[a].precedence < ps->ops[b].precedence; } static inline int is_op_char(struct filter_parse_state *ps, char c) { int i; for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) { if (ps->ops[i].string[0] == c) return 1; } return 0; } static int infix_get_op(struct filter_parse_state *ps, char firstc) { char nextc = infix_peek(ps); char opstr[3]; int i; opstr[0] = firstc; opstr[1] = nextc; opstr[2] = '\0'; for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) { if (!strcmp(opstr, ps->ops[i].string)) { infix_advance(ps); return ps->ops[i].id; } } opstr[1] = '\0'; for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) { if (!strcmp(opstr, ps->ops[i].string)) return ps->ops[i].id; } return OP_NONE; } static inline void clear_operand_string(struct filter_parse_state *ps) { memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL); ps->operand.tail = 0; } static inline int append_operand_char(struct filter_parse_state *ps, char c) { if (ps->operand.tail == MAX_FILTER_STR_VAL - 1) return -EINVAL; ps->operand.string[ps->operand.tail++] = c; return 0; } static int filter_opstack_push(struct filter_parse_state *ps, int op) { struct opstack_op *opstack_op; opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL); if (!opstack_op) return -ENOMEM; opstack_op->op = op; list_add(&opstack_op->list, &ps->opstack); return 0; } static int filter_opstack_empty(struct filter_parse_state *ps) { return list_empty(&ps->opstack); } static int filter_opstack_top(struct filter_parse_state *ps) { struct opstack_op *opstack_op; if (filter_opstack_empty(ps)) return OP_NONE; opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list); return opstack_op->op; } static int filter_opstack_pop(struct filter_parse_state *ps) { struct opstack_op *opstack_op; int op; if (filter_opstack_empty(ps)) return OP_NONE; opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list); op = opstack_op->op; list_del(&opstack_op->list); kfree(opstack_op); return op; } static void filter_opstack_clear(struct filter_parse_state *ps) { while (!filter_opstack_empty(ps)) filter_opstack_pop(ps); } static char *curr_operand(struct filter_parse_state *ps) { return ps->operand.string; } static int postfix_append_operand(struct filter_parse_state *ps, char *operand) { struct postfix_elt *elt; elt = kmalloc(sizeof(*elt), GFP_KERNEL); if (!elt) return -ENOMEM; elt->op = OP_NONE; elt->operand = kstrdup(operand, GFP_KERNEL); if (!elt->operand) { kfree(elt); return -ENOMEM; } list_add_tail(&elt->list, &ps->postfix); return 0; } static int postfix_append_op(struct filter_parse_state *ps, int op) { struct postfix_elt *elt; elt = kmalloc(sizeof(*elt), GFP_KERNEL); if (!elt) return -ENOMEM; elt->op = op; elt->operand = NULL; list_add_tail(&elt->list, &ps->postfix); return 0; } static void postfix_clear(struct filter_parse_state *ps) { struct postfix_elt *elt; while (!list_empty(&ps->postfix)) { elt = list_first_entry(&ps->postfix, struct postfix_elt, list); list_del(&elt->list); kfree(elt->operand); kfree(elt); } } static int filter_parse(struct filter_parse_state *ps) { int in_string = 0; int op, top_op; char ch; while ((ch = infix_next(ps))) { if (ch == '"') { in_string ^= 1; continue; } if (in_string) goto parse_operand; if (isspace(ch)) continue; if (is_op_char(ps, ch)) { op = infix_get_op(ps, ch); if (op == OP_NONE) { parse_error(ps, FILT_ERR_INVALID_OP, 0); return -EINVAL; } if (strlen(curr_operand(ps))) { postfix_append_operand(ps, curr_operand(ps)); clear_operand_string(ps); } while (!filter_opstack_empty(ps)) { top_op = filter_opstack_top(ps); if (!is_precedence_lower(ps, top_op, op)) { top_op = filter_opstack_pop(ps); postfix_append_op(ps, top_op); continue; } break; } filter_opstack_push(ps, op); continue; } if (ch == '(') { filter_opstack_push(ps, OP_OPEN_PAREN); continue; } if (ch == ')') { if (strlen(curr_operand(ps))) { postfix_append_operand(ps, curr_operand(ps)); clear_operand_string(ps); } top_op = filter_opstack_pop(ps); while (top_op != OP_NONE) { if (top_op == OP_OPEN_PAREN) break; postfix_append_op(ps, top_op); top_op = filter_opstack_pop(ps); } if (top_op == OP_NONE) { parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0); return -EINVAL; } continue; } parse_operand: if (append_operand_char(ps, ch)) { parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0); return -EINVAL; } } if (strlen(curr_operand(ps))) postfix_append_operand(ps, curr_operand(ps)); while (!filter_opstack_empty(ps)) { top_op = filter_opstack_pop(ps); if (top_op == OP_NONE) break; if (top_op == OP_OPEN_PAREN) { parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0); return -EINVAL; } postfix_append_op(ps, top_op); } return 0; } static struct filter_pred *create_pred(int op, char *operand1, char *operand2) { struct filter_pred *pred; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) return NULL; pred->field_name = kstrdup(operand1, GFP_KERNEL); if (!pred->field_name) { kfree(pred); return NULL; } strcpy(pred->regex.pattern, operand2); pred->regex.len = strlen(pred->regex.pattern); pred->op = op; return pred; } static struct filter_pred *create_logical_pred(int op) { struct filter_pred *pred; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) return NULL; pred->op = op; return pred; } static int check_preds(struct filter_parse_state *ps) { int n_normal_preds = 0, n_logical_preds = 0; struct postfix_elt *elt; list_for_each_entry(elt, &ps->postfix, list) { if (elt->op == OP_NONE) continue; if (elt->op == OP_AND || elt->op == OP_OR) { n_logical_preds++; continue; } n_normal_preds++; } if (!n_normal_preds || n_logical_preds >= n_normal_preds) { parse_error(ps, FILT_ERR_INVALID_FILTER, 0); return -EINVAL; } return 0; } static int replace_preds(struct ftrace_event_call *call, struct event_filter *filter, struct filter_parse_state *ps, char *filter_string, bool dry_run) { char *operand1 = NULL, *operand2 = NULL; struct filter_pred *pred; struct postfix_elt *elt; int err; int n_preds = 0; err = check_preds(ps); if (err) return err; list_for_each_entry(elt, &ps->postfix, list) { if (elt->op == OP_NONE) { if (!operand1) operand1 = elt->operand; else if (!operand2) operand2 = elt->operand; else { parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0); return -EINVAL; } continue; } if (n_preds++ == MAX_FILTER_PRED) { parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); return -ENOSPC; } if (elt->op == OP_AND || elt->op == OP_OR) { pred = create_logical_pred(elt->op); goto add_pred; } if (!operand1 || !operand2) { parse_error(ps, FILT_ERR_MISSING_FIELD, 0); return -EINVAL; } pred = create_pred(elt->op, operand1, operand2); add_pred: if (!pred) return -ENOMEM; err = filter_add_pred(ps, call, filter, pred, dry_run); filter_free_pred(pred); if (err) return err; operand1 = operand2 = NULL; } return 0; } static int replace_system_preds(struct event_subsystem *system, struct filter_parse_state *ps, char *filter_string) { struct ftrace_event_call *call; bool fail = true; int err; list_for_each_entry(call, &ftrace_events, list) { struct event_filter *filter = call->filter; if (!call->class || !call->class->define_fields) continue; if (strcmp(call->class->system, system->name) != 0) continue; /* try to see if the filter can be applied */ err = replace_preds(call, filter, ps, filter_string, true); if (err) continue; /* really apply the filter */ filter_disable_preds(call); err = replace_preds(call, filter, ps, filter_string, false); if (err) filter_disable_preds(call); else { call->flags |= TRACE_EVENT_FL_FILTERED; replace_filter_string(filter, filter_string); } fail = false; } if (fail) { parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); return -EINVAL; } return 0; } int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { int err; struct filter_parse_state *ps; mutex_lock(&event_mutex); err = init_preds(call); if (err) goto out_unlock; if (!strcmp(strstrip(filter_string), "0")) { filter_disable_preds(call); remove_filter_string(call->filter); goto out_unlock; } err = -ENOMEM; ps = kzalloc(sizeof(*ps), GFP_KERNEL); if (!ps) goto out_unlock; filter_disable_preds(call); replace_filter_string(call->filter, filter_string); parse_init(ps, filter_ops, filter_string); err = filter_parse(ps); if (err) { append_filter_err(ps, call->filter); goto out; } err = replace_preds(call, call->filter, ps, filter_string, false); if (err) append_filter_err(ps, call->filter); else call->flags |= TRACE_EVENT_FL_FILTERED; out: filter_opstack_clear(ps); postfix_clear(ps); kfree(ps); out_unlock: mutex_unlock(&event_mutex); return err; } int apply_subsystem_event_filter(struct event_subsystem *system, char *filter_string) { int err; struct filter_parse_state *ps; mutex_lock(&event_mutex); err = init_subsystem_preds(system); if (err) goto out_unlock; if (!strcmp(strstrip(filter_string), "0")) { filter_free_subsystem_preds(system); remove_filter_string(system->filter); goto out_unlock; } err = -ENOMEM; ps = kzalloc(sizeof(*ps), GFP_KERNEL); if (!ps) goto out_unlock; replace_filter_string(system->filter, filter_string); parse_init(ps, filter_ops, filter_string); err = filter_parse(ps); if (err) { append_filter_err(ps, system->filter); goto out; } err = replace_system_preds(system, ps, filter_string); if (err) append_filter_err(ps, system->filter); out: filter_opstack_clear(ps); postfix_clear(ps); kfree(ps); out_unlock: mutex_unlock(&event_mutex); return err; } #ifdef CONFIG_PERF_EVENTS void ftrace_profile_free_filter(struct perf_event *event) { struct event_filter *filter = event->filter; event->filter = NULL; __free_preds(filter); } int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str) { int err; struct event_filter *filter; struct filter_parse_state *ps; struct ftrace_event_call *call = NULL; mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { if (call->event.type == event_id) break; } err = -EINVAL; if (&call->list == &ftrace_events) goto out_unlock; err = -EEXIST; if (event->filter) goto out_unlock; filter = __alloc_preds(); if (IS_ERR(filter)) { err = PTR_ERR(filter); goto out_unlock; } err = -ENOMEM; ps = kzalloc(sizeof(*ps), GFP_KERNEL); if (!ps) goto free_preds; parse_init(ps, filter_ops, filter_str); err = filter_parse(ps); if (err) goto free_ps; err = replace_preds(call, filter, ps, filter_str, false); if (!err) event->filter = filter; free_ps: filter_opstack_clear(ps); postfix_clear(ps); kfree(ps); free_preds: if (err) __free_preds(filter); out_unlock: mutex_unlock(&event_mutex); return err; } #endif /* CONFIG_PERF_EVENTS */
gpl-2.0
MoKee/android_kernel_samsung_smdk4x12
arch/arm/plat-samsung/dev-ts.c
548
1198
/* linux/arch/arm/mach-s3c64xx/dev-ts.c * * Copyright (c) 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk>, <ben-linux@fluff.org> * * Adapted by Maurus Cuelenaere for s3c64xx * * S3C64XX series device definition for touchscreen device * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> #include <plat/devs.h> #include <plat/ts.h> static struct resource s3c_ts_resource[] = { [0] = { .start = SAMSUNG_PA_ADC, .end = SAMSUNG_PA_ADC + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TC, .end = IRQ_TC, .flags = IORESOURCE_IRQ, }, }; struct platform_device s3c_device_ts = { .name = "s3c64xx-ts", .id = 0, .num_resources = ARRAY_SIZE(s3c_ts_resource), .resource = s3c_ts_resource, }; void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd) { s3c_set_platdata(pd, sizeof(struct s3c2410_ts_mach_info), &s3c_device_ts); }
gpl-2.0
telf/error_state_capture_improvement
net/ax25/ax25_uid.c
804
4916
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/sysctl.h> #include <linux/export.h> #include <net/ip.h> #include <net/arp.h> /* * Callsign/UID mapper. This is in kernel space for security on multi-amateur machines. */ static HLIST_HEAD(ax25_uid_list); static DEFINE_RWLOCK(ax25_uid_lock); int ax25_uid_policy; EXPORT_SYMBOL(ax25_uid_policy); ax25_uid_assoc *ax25_findbyuid(kuid_t uid) { ax25_uid_assoc *ax25_uid, *res = NULL; read_lock(&ax25_uid_lock); ax25_uid_for_each(ax25_uid, &ax25_uid_list) { if (uid_eq(ax25_uid->uid, uid)) { ax25_uid_hold(ax25_uid); res = ax25_uid; break; } } read_unlock(&ax25_uid_lock); return res; } EXPORT_SYMBOL(ax25_findbyuid); int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) { ax25_uid_assoc *ax25_uid; ax25_uid_assoc *user; unsigned long res; switch (cmd) { case SIOCAX25GETUID: res = -ENOENT; read_lock(&ax25_uid_lock); ax25_uid_for_each(ax25_uid, &ax25_uid_list) { if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { res = from_kuid_munged(current_user_ns(), ax25_uid->uid); break; } } read_unlock(&ax25_uid_lock); return res; case SIOCAX25ADDUID: { kuid_t sax25_kuid; if (!capable(CAP_NET_ADMIN)) return -EPERM; sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid); if (!uid_valid(sax25_kuid)) return -EINVAL; user = ax25_findbyuid(sax25_kuid); if (user) { ax25_uid_put(user); return -EEXIST; } if (sax->sax25_uid == 0) return -EINVAL; if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL) return -ENOMEM; atomic_set(&ax25_uid->refcount, 1); ax25_uid->uid = sax25_kuid; ax25_uid->call = sax->sax25_call; write_lock(&ax25_uid_lock); hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list); write_unlock(&ax25_uid_lock); return 0; } case SIOCAX25DELUID: if (!capable(CAP_NET_ADMIN)) return -EPERM; ax25_uid = NULL; write_lock(&ax25_uid_lock); ax25_uid_for_each(ax25_uid, &ax25_uid_list) { if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) break; } if (ax25_uid == NULL) { write_unlock(&ax25_uid_lock); return -ENOENT; } hlist_del_init(&ax25_uid->uid_node); ax25_uid_put(ax25_uid); write_unlock(&ax25_uid_lock); return 0; default: return -EINVAL; } return -EINVAL; /*NOTREACHED */ } #ifdef CONFIG_PROC_FS static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) __acquires(ax25_uid_lock) { read_lock(&ax25_uid_lock); return seq_hlist_start_head(&ax25_uid_list, *pos); } static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &ax25_uid_list, pos); } static void ax25_uid_seq_stop(struct seq_file *seq, void *v) __releases(ax25_uid_lock) { read_unlock(&ax25_uid_lock); } static int ax25_uid_seq_show(struct seq_file *seq, void *v) { char buf[11]; if (v == SEQ_START_TOKEN) seq_printf(seq, "Policy: %d\n", ax25_uid_policy); else { struct ax25_uid_assoc *pt; pt = hlist_entry(v, struct ax25_uid_assoc, uid_node); seq_printf(seq, "%6d %s\n", from_kuid_munged(seq_user_ns(seq), pt->uid), ax2asc(buf, &pt->call)); } return 0; } static const struct seq_operations ax25_uid_seqops = { .start = ax25_uid_seq_start, .next = ax25_uid_seq_next, .stop = ax25_uid_seq_stop, .show = ax25_uid_seq_show, }; static int ax25_uid_info_open(struct inode *inode, struct file *file) { return seq_open(file, &ax25_uid_seqops); } const struct file_operations ax25_uid_fops = { .owner = THIS_MODULE, .open = ax25_uid_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* * Free all memory associated with UID/Callsign structures. */ void __exit ax25_uid_free(void) { ax25_uid_assoc *ax25_uid; write_lock(&ax25_uid_lock); again: ax25_uid_for_each(ax25_uid, &ax25_uid_list) { hlist_del_init(&ax25_uid->uid_node); ax25_uid_put(ax25_uid); goto again; } write_unlock(&ax25_uid_lock); }
gpl-2.0
KOala888/GB_kernel
linux_kernel_galaxyplayer-master/drivers/acpi/thermal.c
804
40569
/* * acpi_thermal.c - ACPI Thermal Zone Driver ($Revision: 41 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This driver fully implements the ACPI thermal policy as described in the * ACPI 2.0 Specification. * * TBD: 1. Implement passive cooling hysteresis. * 2. Enhance passive cooling (CPU) states/limit interface to support * concepts of 'multiple limiters', upper/lower limits, etc. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/jiffies.h> #include <linux/kmod.h> #include <linux/seq_file.h> #include <linux/reboot.h> #include <linux/device.h> #include <asm/uaccess.h> #include <linux/thermal.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #define PREFIX "ACPI: " #define ACPI_THERMAL_CLASS "thermal_zone" #define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" #define ACPI_THERMAL_FILE_STATE "state" #define ACPI_THERMAL_FILE_TEMPERATURE "temperature" #define ACPI_THERMAL_FILE_TRIP_POINTS "trip_points" #define ACPI_THERMAL_FILE_COOLING_MODE "cooling_mode" #define ACPI_THERMAL_FILE_POLLING_FREQ "polling_frequency" #define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80 #define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81 #define ACPI_THERMAL_NOTIFY_DEVICES 0x82 #define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 #define ACPI_THERMAL_NOTIFY_HOT 0xF1 #define ACPI_THERMAL_MODE_ACTIVE 0x00 #define ACPI_THERMAL_MAX_ACTIVE 10 #define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 #define _COMPONENT ACPI_THERMAL_COMPONENT ACPI_MODULE_NAME("thermal"); MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Thermal Zone Driver"); MODULE_LICENSE("GPL"); static int act; module_param(act, int, 0644); MODULE_PARM_DESC(act, "Disable or override all lowest active trip points."); static int crt; module_param(crt, int, 0644); MODULE_PARM_DESC(crt, "Disable or lower all critical trip points."); static int tzp; module_param(tzp, int, 0444); MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds."); static int nocrt; module_param(nocrt, int, 0); MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points."); static int off; module_param(off, int, 0); MODULE_PARM_DESC(off, "Set to disable ACPI thermal support."); static int psv; module_param(psv, int, 0644); MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); static int acpi_thermal_add(struct acpi_device *device); static int acpi_thermal_remove(struct acpi_device *device, int type); static int acpi_thermal_resume(struct acpi_device *device); static void acpi_thermal_notify(struct acpi_device *device, u32 event); static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file); static ssize_t acpi_thermal_write_cooling_mode(struct file *, const char __user *, size_t, loff_t *); static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file); static ssize_t acpi_thermal_write_polling(struct file *, const char __user *, size_t, loff_t *); static const struct acpi_device_id thermal_device_ids[] = { {ACPI_THERMAL_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, thermal_device_ids); static struct acpi_driver acpi_thermal_driver = { .name = "thermal", .class = ACPI_THERMAL_CLASS, .ids = thermal_device_ids, .ops = { .add = acpi_thermal_add, .remove = acpi_thermal_remove, .resume = acpi_thermal_resume, .notify = acpi_thermal_notify, }, }; struct acpi_thermal_state { u8 critical:1; u8 hot:1; u8 passive:1; u8 active:1; u8 reserved:4; int active_index; }; struct acpi_thermal_state_flags { u8 valid:1; u8 enabled:1; u8 reserved:6; }; struct acpi_thermal_critical { struct acpi_thermal_state_flags flags; unsigned long temperature; }; struct acpi_thermal_hot { struct acpi_thermal_state_flags flags; unsigned long temperature; }; struct acpi_thermal_passive { struct acpi_thermal_state_flags flags; unsigned long temperature; unsigned long tc1; unsigned long tc2; unsigned long tsp; struct acpi_handle_list devices; }; struct acpi_thermal_active { struct acpi_thermal_state_flags flags; unsigned long temperature; struct acpi_handle_list devices; }; struct acpi_thermal_trips { struct acpi_thermal_critical critical; struct acpi_thermal_hot hot; struct acpi_thermal_passive passive; struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE]; }; struct acpi_thermal_flags { u8 cooling_mode:1; /* _SCP */ u8 devices:1; /* _TZD */ u8 reserved:6; }; struct acpi_thermal { struct acpi_device * device; acpi_bus_id name; unsigned long temperature; unsigned long last_temperature; unsigned long polling_frequency; volatile u8 zombie; struct acpi_thermal_flags flags; struct acpi_thermal_state state; struct acpi_thermal_trips trips; struct acpi_handle_list devices; struct thermal_zone_device *thermal_zone; int tz_enabled; int kelvin_offset; struct mutex lock; }; static const struct file_operations acpi_thermal_state_fops = { .owner = THIS_MODULE, .open = acpi_thermal_state_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations acpi_thermal_temp_fops = { .owner = THIS_MODULE, .open = acpi_thermal_temp_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations acpi_thermal_trip_fops = { .owner = THIS_MODULE, .open = acpi_thermal_trip_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations acpi_thermal_cooling_fops = { .owner = THIS_MODULE, .open = acpi_thermal_cooling_open_fs, .read = seq_read, .write = acpi_thermal_write_cooling_mode, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations acpi_thermal_polling_fops = { .owner = THIS_MODULE, .open = acpi_thermal_polling_open_fs, .read = seq_read, .write = acpi_thermal_write_polling, .llseek = seq_lseek, .release = single_release, }; /* -------------------------------------------------------------------------- Thermal Zone Management -------------------------------------------------------------------------- */ static int acpi_thermal_get_temperature(struct acpi_thermal *tz) { acpi_status status = AE_OK; unsigned long long tmp; if (!tz) return -EINVAL; tz->last_temperature = tz->temperature; status = acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tmp); if (ACPI_FAILURE(status)) return -ENODEV; tz->temperature = tmp; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n", tz->temperature)); return 0; } static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) { acpi_status status = AE_OK; unsigned long long tmp; if (!tz) return -EINVAL; status = acpi_evaluate_integer(tz->device->handle, "_TZP", NULL, &tmp); if (ACPI_FAILURE(status)) return -ENODEV; tz->polling_frequency = tmp; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n", tz->polling_frequency)); return 0; } static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds) { if (!tz) return -EINVAL; tz->polling_frequency = seconds * 10; /* Convert value to deci-seconds */ tz->thermal_zone->polling_delay = seconds * 1000; if (tz->tz_enabled) thermal_zone_device_update(tz->thermal_zone); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency set to %lu seconds\n", tz->polling_frequency/10)); return 0; } static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode) { acpi_status status = AE_OK; union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list arg_list = { 1, &arg0 }; acpi_handle handle = NULL; if (!tz) return -EINVAL; status = acpi_get_handle(tz->device->handle, "_SCP", &handle); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n")); return -ENODEV; } arg0.integer.value = mode; status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); if (ACPI_FAILURE(status)) return -ENODEV; return 0; } #define ACPI_TRIPS_CRITICAL 0x01 #define ACPI_TRIPS_HOT 0x02 #define ACPI_TRIPS_PASSIVE 0x04 #define ACPI_TRIPS_ACTIVE 0x08 #define ACPI_TRIPS_DEVICES 0x10 #define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE) #define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES #define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \ ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \ ACPI_TRIPS_DEVICES) /* * This exception is thrown out in two cases: * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid * when re-evaluating the AML code. * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change. * We need to re-bind the cooling devices of a thermal zone when this occurs. */ #define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \ do { \ if (flags != ACPI_TRIPS_INIT) \ ACPI_EXCEPTION((AE_INFO, AE_ERROR, \ "ACPI thermal trip point %s changed\n" \ "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \ } while (0) static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) { acpi_status status = AE_OK; unsigned long long tmp; struct acpi_handle_list devices; int valid = 0; int i; /* Critical Shutdown */ if (flag & ACPI_TRIPS_CRITICAL) { status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp); tz->trips.critical.temperature = tmp; /* * Treat freezing temperatures as invalid as well; some * BIOSes return really low values and cause reboots at startup. * Below zero (Celsius) values clearly aren't right for sure.. * ... so lets discard those as invalid. */ if (ACPI_FAILURE(status)) { tz->trips.critical.flags.valid = 0; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No critical threshold\n")); } else if (tmp <= 2732) { printk(KERN_WARNING FW_BUG "Invalid critical threshold " "(%llu)\n", tmp); tz->trips.critical.flags.valid = 0; } else { tz->trips.critical.flags.valid = 1; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found critical threshold [%lu]\n", tz->trips.critical.temperature)); } if (tz->trips.critical.flags.valid == 1) { if (crt == -1) { tz->trips.critical.flags.valid = 0; } else if (crt > 0) { unsigned long crt_k = CELSIUS_TO_KELVIN(crt); /* * Allow override critical threshold */ if (crt_k > tz->trips.critical.temperature) printk(KERN_WARNING PREFIX "Critical threshold %d C\n", crt); tz->trips.critical.temperature = crt_k; } } } /* Critical Sleep (optional) */ if (flag & ACPI_TRIPS_HOT) { status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp); if (ACPI_FAILURE(status)) { tz->trips.hot.flags.valid = 0; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No hot threshold\n")); } else { tz->trips.hot.temperature = tmp; tz->trips.hot.flags.valid = 1; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found hot threshold [%lu]\n", tz->trips.critical.temperature)); } } /* Passive (optional) */ if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) || (flag == ACPI_TRIPS_INIT)) { valid = tz->trips.passive.flags.valid; if (psv == -1) { status = AE_SUPPORT; } else if (psv > 0) { tmp = CELSIUS_TO_KELVIN(psv); status = AE_OK; } else { status = acpi_evaluate_integer(tz->device->handle, "_PSV", NULL, &tmp); } if (ACPI_FAILURE(status)) tz->trips.passive.flags.valid = 0; else { tz->trips.passive.temperature = tmp; tz->trips.passive.flags.valid = 1; if (flag == ACPI_TRIPS_INIT) { status = acpi_evaluate_integer( tz->device->handle, "_TC1", NULL, &tmp); if (ACPI_FAILURE(status)) tz->trips.passive.flags.valid = 0; else tz->trips.passive.tc1 = tmp; status = acpi_evaluate_integer( tz->device->handle, "_TC2", NULL, &tmp); if (ACPI_FAILURE(status)) tz->trips.passive.flags.valid = 0; else tz->trips.passive.tc2 = tmp; status = acpi_evaluate_integer( tz->device->handle, "_TSP", NULL, &tmp); if (ACPI_FAILURE(status)) tz->trips.passive.flags.valid = 0; else tz->trips.passive.tsp = tmp; } } } if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) { memset(&devices, 0, sizeof(struct acpi_handle_list)); status = acpi_evaluate_reference(tz->device->handle, "_PSL", NULL, &devices); if (ACPI_FAILURE(status)) { printk(KERN_WARNING PREFIX "Invalid passive threshold\n"); tz->trips.passive.flags.valid = 0; } else tz->trips.passive.flags.valid = 1; if (memcmp(&tz->trips.passive.devices, &devices, sizeof(struct acpi_handle_list))) { memcpy(&tz->trips.passive.devices, &devices, sizeof(struct acpi_handle_list)); ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device"); } } if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) { if (valid != tz->trips.passive.flags.valid) ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state"); } /* Active (optional) */ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { char name[5] = { '_', 'A', 'C', ('0' + i), '\0' }; valid = tz->trips.active[i].flags.valid; if (act == -1) break; /* disable all active trip points */ if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) && tz->trips.active[i].flags.valid)) { status = acpi_evaluate_integer(tz->device->handle, name, NULL, &tmp); if (ACPI_FAILURE(status)) { tz->trips.active[i].flags.valid = 0; if (i == 0) break; if (act <= 0) break; if (i == 1) tz->trips.active[0].temperature = CELSIUS_TO_KELVIN(act); else /* * Don't allow override higher than * the next higher trip point */ tz->trips.active[i - 1].temperature = (tz->trips.active[i - 2].temperature < CELSIUS_TO_KELVIN(act) ? tz->trips.active[i - 2].temperature : CELSIUS_TO_KELVIN(act)); break; } else { tz->trips.active[i].temperature = tmp; tz->trips.active[i].flags.valid = 1; } } name[2] = 'L'; if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) { memset(&devices, 0, sizeof(struct acpi_handle_list)); status = acpi_evaluate_reference(tz->device->handle, name, NULL, &devices); if (ACPI_FAILURE(status)) { printk(KERN_WARNING PREFIX "Invalid active%d threshold\n", i); tz->trips.active[i].flags.valid = 0; } else tz->trips.active[i].flags.valid = 1; if (memcmp(&tz->trips.active[i].devices, &devices, sizeof(struct acpi_handle_list))) { memcpy(&tz->trips.active[i].devices, &devices, sizeof(struct acpi_handle_list)); ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device"); } } if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES)) if (valid != tz->trips.active[i].flags.valid) ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state"); if (!tz->trips.active[i].flags.valid) break; } if (flag & ACPI_TRIPS_DEVICES) { memset(&devices, 0, sizeof(struct acpi_handle_list)); status = acpi_evaluate_reference(tz->device->handle, "_TZD", NULL, &devices); if (memcmp(&tz->devices, &devices, sizeof(struct acpi_handle_list))) { memcpy(&tz->devices, &devices, sizeof(struct acpi_handle_list)); ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device"); } } return 0; } static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) { int i, valid, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT); if (ret) return ret; valid = tz->trips.critical.flags.valid | tz->trips.hot.flags.valid | tz->trips.passive.flags.valid; for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) valid |= tz->trips.active[i].flags.valid; if (!valid) { printk(KERN_WARNING FW_BUG "No valid trip found\n"); return -ENODEV; } return 0; } static void acpi_thermal_check(void *data) { struct acpi_thermal *tz = data; thermal_zone_device_update(tz->thermal_zone); } /* sys I/F for generic thermal sysfs support */ #define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100) static int thermal_get_temp(struct thermal_zone_device *thermal, unsigned long *temp) { struct acpi_thermal *tz = thermal->devdata; int result; if (!tz) return -EINVAL; result = acpi_thermal_get_temperature(tz); if (result) return result; *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset); return 0; } static const char enabled[] = "kernel"; static const char disabled[] = "user"; static int thermal_get_mode(struct thermal_zone_device *thermal, enum thermal_device_mode *mode) { struct acpi_thermal *tz = thermal->devdata; if (!tz) return -EINVAL; *mode = tz->tz_enabled ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED; return 0; } static int thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { struct acpi_thermal *tz = thermal->devdata; int enable; if (!tz) return -EINVAL; /* * enable/disable thermal management from ACPI thermal driver */ if (mode == THERMAL_DEVICE_ENABLED) enable = 1; else if (mode == THERMAL_DEVICE_DISABLED) enable = 0; else return -EINVAL; if (enable != tz->tz_enabled) { tz->tz_enabled = enable; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s ACPI thermal control\n", tz->tz_enabled ? enabled : disabled)); acpi_thermal_check(tz); } return 0; } static int thermal_get_trip_type(struct thermal_zone_device *thermal, int trip, enum thermal_trip_type *type) { struct acpi_thermal *tz = thermal->devdata; int i; if (!tz || trip < 0) return -EINVAL; if (tz->trips.critical.flags.valid) { if (!trip) { *type = THERMAL_TRIP_CRITICAL; return 0; } trip--; } if (tz->trips.hot.flags.valid) { if (!trip) { *type = THERMAL_TRIP_HOT; return 0; } trip--; } if (tz->trips.passive.flags.valid) { if (!trip) { *type = THERMAL_TRIP_PASSIVE; return 0; } trip--; } for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) { if (!trip) { *type = THERMAL_TRIP_ACTIVE; return 0; } trip--; } return -EINVAL; } static int thermal_get_trip_temp(struct thermal_zone_device *thermal, int trip, unsigned long *temp) { struct acpi_thermal *tz = thermal->devdata; int i; if (!tz || trip < 0) return -EINVAL; if (tz->trips.critical.flags.valid) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( tz->trips.critical.temperature, tz->kelvin_offset); return 0; } trip--; } if (tz->trips.hot.flags.valid) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( tz->trips.hot.temperature, tz->kelvin_offset); return 0; } trip--; } if (tz->trips.passive.flags.valid) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( tz->trips.passive.temperature, tz->kelvin_offset); return 0; } trip--; } for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( tz->trips.active[i].temperature, tz->kelvin_offset); return 0; } trip--; } return -EINVAL; } static int thermal_get_crit_temp(struct thermal_zone_device *thermal, unsigned long *temperature) { struct acpi_thermal *tz = thermal->devdata; if (tz->trips.critical.flags.valid) { *temperature = KELVIN_TO_MILLICELSIUS( tz->trips.critical.temperature, tz->kelvin_offset); return 0; } else return -EINVAL; } static int thermal_notify(struct thermal_zone_device *thermal, int trip, enum thermal_trip_type trip_type) { u8 type = 0; struct acpi_thermal *tz = thermal->devdata; if (trip_type == THERMAL_TRIP_CRITICAL) type = ACPI_THERMAL_NOTIFY_CRITICAL; else if (trip_type == THERMAL_TRIP_HOT) type = ACPI_THERMAL_NOTIFY_HOT; else return 0; acpi_bus_generate_proc_event(tz->device, type, 1); acpi_bus_generate_netlink_event(tz->device->pnp.device_class, dev_name(&tz->device->dev), type, 1); if (trip_type == THERMAL_TRIP_CRITICAL && nocrt) return 1; return 0; } typedef int (*cb)(struct thermal_zone_device *, int, struct thermal_cooling_device *); static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev, cb action) { struct acpi_device *device = cdev->devdata; struct acpi_thermal *tz = thermal->devdata; struct acpi_device *dev; acpi_status status; acpi_handle handle; int i; int j; int trip = -1; int result = 0; if (tz->trips.critical.flags.valid) trip++; if (tz->trips.hot.flags.valid) trip++; if (tz->trips.passive.flags.valid) { trip++; for (i = 0; i < tz->trips.passive.devices.count; i++) { handle = tz->trips.passive.devices.handles[i]; status = acpi_bus_get_device(handle, &dev); if (ACPI_SUCCESS(status) && (dev == device)) { result = action(thermal, trip, cdev); if (result) goto failed; } } } for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { if (!tz->trips.active[i].flags.valid) break; trip++; for (j = 0; j < tz->trips.active[i].devices.count; j++) { handle = tz->trips.active[i].devices.handles[j]; status = acpi_bus_get_device(handle, &dev); if (ACPI_SUCCESS(status) && (dev == device)) { result = action(thermal, trip, cdev); if (result) goto failed; } } } for (i = 0; i < tz->devices.count; i++) { handle = tz->devices.handles[i]; status = acpi_bus_get_device(handle, &dev); if (ACPI_SUCCESS(status) && (dev == device)) { result = action(thermal, -1, cdev); if (result) goto failed; } } failed: return result; } static int acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { return acpi_thermal_cooling_device_cb(thermal, cdev, thermal_zone_bind_cooling_device); } static int acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { return acpi_thermal_cooling_device_cb(thermal, cdev, thermal_zone_unbind_cooling_device); } static struct thermal_zone_device_ops acpi_thermal_zone_ops = { .bind = acpi_thermal_bind_cooling_device, .unbind = acpi_thermal_unbind_cooling_device, .get_temp = thermal_get_temp, .get_mode = thermal_get_mode, .set_mode = thermal_set_mode, .get_trip_type = thermal_get_trip_type, .get_trip_temp = thermal_get_trip_temp, .get_crit_temp = thermal_get_crit_temp, .notify = thermal_notify, }; static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) { int trips = 0; int result; acpi_status status; int i; if (tz->trips.critical.flags.valid) trips++; if (tz->trips.hot.flags.valid) trips++; if (tz->trips.passive.flags.valid) trips++; for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++, trips++); if (tz->trips.passive.flags.valid) tz->thermal_zone = thermal_zone_device_register("acpitz", trips, tz, &acpi_thermal_zone_ops, tz->trips.passive.tc1, tz->trips.passive.tc2, tz->trips.passive.tsp*100, tz->polling_frequency*100); else tz->thermal_zone = thermal_zone_device_register("acpitz", trips, tz, &acpi_thermal_zone_ops, 0, 0, 0, tz->polling_frequency*100); if (IS_ERR(tz->thermal_zone)) return -ENODEV; result = sysfs_create_link(&tz->device->dev.kobj, &tz->thermal_zone->device.kobj, "thermal_zone"); if (result) return result; result = sysfs_create_link(&tz->thermal_zone->device.kobj, &tz->device->dev.kobj, "device"); if (result) return result; status = acpi_attach_data(tz->device->handle, acpi_bus_private_data_handler, tz->thermal_zone); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Error attaching device data\n"); return -ENODEV; } tz->tz_enabled = 1; dev_info(&tz->device->dev, "registered as thermal_zone%d\n", tz->thermal_zone->id); return 0; } static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) { sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone"); sysfs_remove_link(&tz->thermal_zone->device.kobj, "device"); thermal_zone_device_unregister(tz->thermal_zone); tz->thermal_zone = NULL; acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler); } /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ static struct proc_dir_entry *acpi_thermal_dir; static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset) { struct acpi_thermal *tz = seq->private; if (!tz) goto end; seq_puts(seq, "state: "); if (!tz->state.critical && !tz->state.hot && !tz->state.passive && !tz->state.active) seq_puts(seq, "ok\n"); else { if (tz->state.critical) seq_puts(seq, "critical "); if (tz->state.hot) seq_puts(seq, "hot "); if (tz->state.passive) seq_puts(seq, "passive "); if (tz->state.active) seq_printf(seq, "active[%d]", tz->state.active_index); seq_puts(seq, "\n"); } end: return 0; } static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_thermal_state_seq_show, PDE(inode)->data); } static int acpi_thermal_temp_seq_show(struct seq_file *seq, void *offset) { int result = 0; struct acpi_thermal *tz = seq->private; if (!tz) goto end; result = acpi_thermal_get_temperature(tz); if (result) goto end; seq_printf(seq, "temperature: %ld C\n", KELVIN_TO_CELSIUS(tz->temperature)); end: return 0; } static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_thermal_temp_seq_show, PDE(inode)->data); } static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset) { struct acpi_thermal *tz = seq->private; struct acpi_device *device; acpi_status status; int i = 0; int j = 0; if (!tz) goto end; if (tz->trips.critical.flags.valid) seq_printf(seq, "critical (S5): %ld C%s", KELVIN_TO_CELSIUS(tz->trips.critical.temperature), nocrt ? " <disabled>\n" : "\n"); if (tz->trips.hot.flags.valid) seq_printf(seq, "hot (S4): %ld C%s", KELVIN_TO_CELSIUS(tz->trips.hot.temperature), nocrt ? " <disabled>\n" : "\n"); if (tz->trips.passive.flags.valid) { seq_printf(seq, "passive: %ld C: tc1=%lu tc2=%lu tsp=%lu devices=", KELVIN_TO_CELSIUS(tz->trips.passive.temperature), tz->trips.passive.tc1, tz->trips.passive.tc2, tz->trips.passive.tsp); for (j = 0; j < tz->trips.passive.devices.count; j++) { status = acpi_bus_get_device(tz->trips.passive.devices. handles[j], &device); seq_printf(seq, "%4.4s ", status ? "" : acpi_device_bid(device)); } seq_puts(seq, "\n"); } else { seq_printf(seq, "passive (forced):"); if (tz->thermal_zone->forced_passive) seq_printf(seq, " %i C\n", tz->thermal_zone->forced_passive / 1000); else seq_printf(seq, "<not set>\n"); } for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { if (!(tz->trips.active[i].flags.valid)) break; seq_printf(seq, "active[%d]: %ld C: devices=", i, KELVIN_TO_CELSIUS(tz->trips.active[i].temperature)); for (j = 0; j < tz->trips.active[i].devices.count; j++){ status = acpi_bus_get_device(tz->trips.active[i]. devices.handles[j], &device); seq_printf(seq, "%4.4s ", status ? "" : acpi_device_bid(device)); } seq_puts(seq, "\n"); } end: return 0; } static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data); } static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset) { struct acpi_thermal *tz = seq->private; if (!tz) goto end; if (!tz->flags.cooling_mode) seq_puts(seq, "<setting not supported>\n"); else seq_puts(seq, "0 - Active; 1 - Passive\n"); end: return 0; } static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_thermal_cooling_seq_show, PDE(inode)->data); } static ssize_t acpi_thermal_write_cooling_mode(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { struct seq_file *m = file->private_data; struct acpi_thermal *tz = m->private; int result = 0; char mode_string[12] = { '\0' }; if (!tz || (count > sizeof(mode_string) - 1)) return -EINVAL; if (!tz->flags.cooling_mode) return -ENODEV; if (copy_from_user(mode_string, buffer, count)) return -EFAULT; mode_string[count] = '\0'; result = acpi_thermal_set_cooling_mode(tz, simple_strtoul(mode_string, NULL, 0)); if (result) return result; acpi_thermal_check(tz); return count; } static int acpi_thermal_polling_seq_show(struct seq_file *seq, void *offset) { struct acpi_thermal *tz = seq->private; if (!tz) goto end; if (!tz->thermal_zone->polling_delay) { seq_puts(seq, "<polling disabled>\n"); goto end; } seq_printf(seq, "polling frequency: %d seconds\n", (tz->thermal_zone->polling_delay / 1000)); end: return 0; } static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_thermal_polling_seq_show, PDE(inode)->data); } static ssize_t acpi_thermal_write_polling(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { struct seq_file *m = file->private_data; struct acpi_thermal *tz = m->private; int result = 0; char polling_string[12] = { '\0' }; int seconds = 0; if (!tz || (count > sizeof(polling_string) - 1)) return -EINVAL; if (copy_from_user(polling_string, buffer, count)) return -EFAULT; polling_string[count] = '\0'; seconds = simple_strtoul(polling_string, NULL, 0); result = acpi_thermal_set_polling(tz, seconds); if (result) return result; acpi_thermal_check(tz); return count; } static int acpi_thermal_add_fs(struct acpi_device *device) { struct proc_dir_entry *entry = NULL; if (!acpi_device_dir(device)) { acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_thermal_dir); if (!acpi_device_dir(device)) return -ENODEV; } /* 'state' [R] */ entry = proc_create_data(ACPI_THERMAL_FILE_STATE, S_IRUGO, acpi_device_dir(device), &acpi_thermal_state_fops, acpi_driver_data(device)); if (!entry) return -ENODEV; /* 'temperature' [R] */ entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE, S_IRUGO, acpi_device_dir(device), &acpi_thermal_temp_fops, acpi_driver_data(device)); if (!entry) return -ENODEV; /* 'trip_points' [R] */ entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS, S_IRUGO, acpi_device_dir(device), &acpi_thermal_trip_fops, acpi_driver_data(device)); if (!entry) return -ENODEV; /* 'cooling_mode' [R/W] */ entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE, S_IFREG | S_IRUGO | S_IWUSR, acpi_device_dir(device), &acpi_thermal_cooling_fops, acpi_driver_data(device)); if (!entry) return -ENODEV; /* 'polling_frequency' [R/W] */ entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ, S_IFREG | S_IRUGO | S_IWUSR, acpi_device_dir(device), &acpi_thermal_polling_fops, acpi_driver_data(device)); if (!entry) return -ENODEV; return 0; } static int acpi_thermal_remove_fs(struct acpi_device *device) { if (acpi_device_dir(device)) { remove_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ, acpi_device_dir(device)); remove_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE, acpi_device_dir(device)); remove_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS, acpi_device_dir(device)); remove_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE, acpi_device_dir(device)); remove_proc_entry(ACPI_THERMAL_FILE_STATE, acpi_device_dir(device)); remove_proc_entry(acpi_device_bid(device), acpi_thermal_dir); acpi_device_dir(device) = NULL; } return 0; } /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static void acpi_thermal_notify(struct acpi_device *device, u32 event) { struct acpi_thermal *tz = acpi_driver_data(device); if (!tz) return; switch (event) { case ACPI_THERMAL_NOTIFY_TEMPERATURE: acpi_thermal_check(tz); break; case ACPI_THERMAL_NOTIFY_THRESHOLDS: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); acpi_thermal_check(tz); acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; case ACPI_THERMAL_NOTIFY_DEVICES: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); acpi_thermal_check(tz); acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); break; } } static int acpi_thermal_get_info(struct acpi_thermal *tz) { int result = 0; if (!tz) return -EINVAL; /* Get temperature [_TMP] (required) */ result = acpi_thermal_get_temperature(tz); if (result) return result; /* Get trip points [_CRT, _PSV, etc.] (required) */ result = acpi_thermal_get_trip_points(tz); if (result) return result; /* Set the cooling mode [_SCP] to active cooling (default) */ result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE); if (!result) tz->flags.cooling_mode = 1; /* Get default polling frequency [_TZP] (optional) */ if (tzp) tz->polling_frequency = tzp; else acpi_thermal_get_polling_frequency(tz); return 0; } /* * The exact offset between Kelvin and degree Celsius is 273.15. However ACPI * handles temperature values with a single decimal place. As a consequence, * some implementations use an offset of 273.1 and others use an offset of * 273.2. Try to find out which one is being used, to present the most * accurate and visually appealing number. * * The heuristic below should work for all ACPI thermal zones which have a * critical trip point with a value being a multiple of 0.5 degree Celsius. */ static void acpi_thermal_guess_offset(struct acpi_thermal *tz) { if (tz->trips.critical.flags.valid && (tz->trips.critical.temperature % 5) == 1) tz->kelvin_offset = 2731; else tz->kelvin_offset = 2732; } static int acpi_thermal_add(struct acpi_device *device) { int result = 0; struct acpi_thermal *tz = NULL; if (!device) return -EINVAL; tz = kzalloc(sizeof(struct acpi_thermal), GFP_KERNEL); if (!tz) return -ENOMEM; tz->device = device; strcpy(tz->name, device->pnp.bus_id); strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS); device->driver_data = tz; mutex_init(&tz->lock); result = acpi_thermal_get_info(tz); if (result) goto free_memory; acpi_thermal_guess_offset(tz); result = acpi_thermal_register_thermal_zone(tz); if (result) goto free_memory; result = acpi_thermal_add_fs(device); if (result) goto unregister_thermal_zone; printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature)); goto end; unregister_thermal_zone: thermal_zone_device_unregister(tz->thermal_zone); free_memory: kfree(tz); end: return result; } static int acpi_thermal_remove(struct acpi_device *device, int type) { struct acpi_thermal *tz = NULL; if (!device || !acpi_driver_data(device)) return -EINVAL; tz = acpi_driver_data(device); acpi_thermal_remove_fs(device); acpi_thermal_unregister_thermal_zone(tz); mutex_destroy(&tz->lock); kfree(tz); return 0; } static int acpi_thermal_resume(struct acpi_device *device) { struct acpi_thermal *tz = NULL; int i, j, power_state, result; if (!device || !acpi_driver_data(device)) return -EINVAL; tz = acpi_driver_data(device); for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { if (!(&tz->trips.active[i])) break; if (!tz->trips.active[i].flags.valid) break; tz->trips.active[i].flags.enabled = 1; for (j = 0; j < tz->trips.active[i].devices.count; j++) { result = acpi_bus_get_power(tz->trips.active[i].devices. handles[j], &power_state); if (result || (power_state != ACPI_STATE_D0)) { tz->trips.active[i].flags.enabled = 0; break; } } tz->state.active |= tz->trips.active[i].flags.enabled; } acpi_thermal_check(tz); return AE_OK; } static int thermal_act(const struct dmi_system_id *d) { if (act == 0) { printk(KERN_NOTICE "ACPI: %s detected: " "disabling all active thermal trip points\n", d->ident); act = -1; } return 0; } static int thermal_nocrt(const struct dmi_system_id *d) { printk(KERN_NOTICE "ACPI: %s detected: " "disabling all critical thermal trip point actions.\n", d->ident); nocrt = 1; return 0; } static int thermal_tzp(const struct dmi_system_id *d) { if (tzp == 0) { printk(KERN_NOTICE "ACPI: %s detected: " "enabling thermal zone polling\n", d->ident); tzp = 300; /* 300 dS = 30 Seconds */ } return 0; } static int thermal_psv(const struct dmi_system_id *d) { if (psv == 0) { printk(KERN_NOTICE "ACPI: %s detected: " "disabling all passive thermal trip points\n", d->ident); psv = -1; } return 0; } static struct dmi_system_id thermal_dmi_table[] __initdata = { /* * Award BIOS on this AOpen makes thermal control almost worthless. * http://bugzilla.kernel.org/show_bug.cgi?id=8842 */ { .callback = thermal_act, .ident = "AOpen i915GMm-HFS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), }, }, { .callback = thermal_psv, .ident = "AOpen i915GMm-HFS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), }, }, { .callback = thermal_tzp, .ident = "AOpen i915GMm-HFS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), }, }, { .callback = thermal_nocrt, .ident = "Gigabyte GA-7ZX", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_NAME, "7ZX"), }, }, {} }; static int __init acpi_thermal_init(void) { int result = 0; dmi_check_system(thermal_dmi_table); if (off) { printk(KERN_NOTICE "ACPI: thermal control disabled\n"); return -ENODEV; } acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir); if (!acpi_thermal_dir) return -ENODEV; result = acpi_bus_register_driver(&acpi_thermal_driver); if (result < 0) { remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir); return -ENODEV; } return 0; } static void __exit acpi_thermal_exit(void) { acpi_bus_unregister_driver(&acpi_thermal_driver); remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir); return; } module_init(acpi_thermal_init); module_exit(acpi_thermal_exit);
gpl-2.0
sdadier/gb_kernel_2.6.32.9-mtd
Kernel/security/selinux/netlink.c
804
2430
/* * Netlink event notifications for SELinux. * * Author: James Morris <jmorris@redhat.com> * * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/selinux_netlink.h> #include <net/net_namespace.h> static struct sock *selnl; static int selnl_msglen(int msgtype) { int ret = 0; switch (msgtype) { case SELNL_MSG_SETENFORCE: ret = sizeof(struct selnl_msg_setenforce); break; case SELNL_MSG_POLICYLOAD: ret = sizeof(struct selnl_msg_policyload); break; default: BUG(); } return ret; } static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *data) { switch (msgtype) { case SELNL_MSG_SETENFORCE: { struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh); memset(msg, 0, len); msg->val = *((int *)data); break; } case SELNL_MSG_POLICYLOAD: { struct selnl_msg_policyload *msg = NLMSG_DATA(nlh); memset(msg, 0, len); msg->seqno = *((u32 *)data); break; } default: BUG(); } } static void selnl_notify(int msgtype, void *data) { int len; sk_buff_data_t tmp; struct sk_buff *skb; struct nlmsghdr *nlh; len = selnl_msglen(msgtype); skb = alloc_skb(NLMSG_SPACE(len), GFP_USER); if (!skb) goto oom; tmp = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, msgtype, len); selnl_add_payload(nlh, len, msgtype, data); nlh->nlmsg_len = skb->tail - tmp; NETLINK_CB(skb).dst_group = SELNLGRP_AVC; netlink_broadcast(selnl, skb, 0, SELNLGRP_AVC, GFP_USER); out: return; nlmsg_failure: kfree_skb(skb); oom: printk(KERN_ERR "SELinux: OOM in %s\n", __func__); goto out; } void selnl_notify_setenforce(int val) { selnl_notify(SELNL_MSG_SETENFORCE, &val); } void selnl_notify_policyload(u32 seqno) { selnl_notify(SELNL_MSG_POLICYLOAD, &seqno); } static int __init selnl_init(void) { selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, SELNLGRP_MAX, NULL, NULL, THIS_MODULE); if (selnl == NULL) panic("SELinux: Cannot create netlink socket."); netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV); return 0; } __initcall(selnl_init);
gpl-2.0
NicholasPace/android_kernel_asus_moorefield
fs/xfs/xfs_alloc.c
2084
73221
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_alloc.h" #include "xfs_extent_busy.h" #include "xfs_error.h" #include "xfs_cksum.h" #include "xfs_trace.h" #include "xfs_buf_item.h" struct workqueue_struct *xfs_alloc_wq; #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) #define XFSA_FIXUP_BNO_OK 1 #define XFSA_FIXUP_CNT_OK 2 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *); STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *); STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); /* * Lookup the record equal to [bno, len] in the btree given by cur. */ STATIC int /* error */ xfs_alloc_lookup_eq( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len, /* length of extent */ int *stat) /* success/failure */ { cur->bc_rec.a.ar_startblock = bno; cur->bc_rec.a.ar_blockcount = len; return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); } /* * Lookup the first record greater than or equal to [bno, len] * in the btree given by cur. */ int /* error */ xfs_alloc_lookup_ge( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len, /* length of extent */ int *stat) /* success/failure */ { cur->bc_rec.a.ar_startblock = bno; cur->bc_rec.a.ar_blockcount = len; return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); } /* * Lookup the first record less than or equal to [bno, len] * in the btree given by cur. */ int /* error */ xfs_alloc_lookup_le( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len, /* length of extent */ int *stat) /* success/failure */ { cur->bc_rec.a.ar_startblock = bno; cur->bc_rec.a.ar_blockcount = len; return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); } /* * Update the record referred to by cur to the value given * by [bno, len]. * This either works (return 0) or gets an EFSCORRUPTED error. */ STATIC int /* error */ xfs_alloc_update( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len) /* length of extent */ { union xfs_btree_rec rec; rec.alloc.ar_startblock = cpu_to_be32(bno); rec.alloc.ar_blockcount = cpu_to_be32(len); return xfs_btree_update(cur, &rec); } /* * Get the data from the pointed-to record. */ int /* error */ xfs_alloc_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t *bno, /* output: starting block of extent */ xfs_extlen_t *len, /* output: length of extent */ int *stat) /* output: success/failure */ { union xfs_btree_rec *rec; int error; error = xfs_btree_get_rec(cur, &rec, stat); if (!error && *stat == 1) { *bno = be32_to_cpu(rec->alloc.ar_startblock); *len = be32_to_cpu(rec->alloc.ar_blockcount); } return error; } /* * Compute aligned version of the found extent. * Takes alignment and min length into account. */ STATIC void xfs_alloc_compute_aligned( xfs_alloc_arg_t *args, /* allocation argument structure */ xfs_agblock_t foundbno, /* starting block in found extent */ xfs_extlen_t foundlen, /* length in found extent */ xfs_agblock_t *resbno, /* result block number */ xfs_extlen_t *reslen) /* result length */ { xfs_agblock_t bno; xfs_extlen_t len; /* Trim busy sections out of found extent */ xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len); if (args->alignment > 1 && len >= args->minlen) { xfs_agblock_t aligned_bno = roundup(bno, args->alignment); xfs_extlen_t diff = aligned_bno - bno; *resbno = aligned_bno; *reslen = diff >= len ? 0 : len - diff; } else { *resbno = bno; *reslen = len; } } /* * Compute best start block and diff for "near" allocations. * freelen >= wantlen already checked by caller. */ STATIC xfs_extlen_t /* difference value (absolute) */ xfs_alloc_compute_diff( xfs_agblock_t wantbno, /* target starting block */ xfs_extlen_t wantlen, /* target length */ xfs_extlen_t alignment, /* target alignment */ xfs_agblock_t freebno, /* freespace's starting block */ xfs_extlen_t freelen, /* freespace's length */ xfs_agblock_t *newbnop) /* result: best start block from free */ { xfs_agblock_t freeend; /* end of freespace extent */ xfs_agblock_t newbno1; /* return block number */ xfs_agblock_t newbno2; /* other new block number */ xfs_extlen_t newlen1=0; /* length with newbno1 */ xfs_extlen_t newlen2=0; /* length with newbno2 */ xfs_agblock_t wantend; /* end of target extent */ ASSERT(freelen >= wantlen); freeend = freebno + freelen; wantend = wantbno + wantlen; if (freebno >= wantbno) { if ((newbno1 = roundup(freebno, alignment)) >= freeend) newbno1 = NULLAGBLOCK; } else if (freeend >= wantend && alignment > 1) { newbno1 = roundup(wantbno, alignment); newbno2 = newbno1 - alignment; if (newbno1 >= freeend) newbno1 = NULLAGBLOCK; else newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1); if (newbno2 < freebno) newbno2 = NULLAGBLOCK; else newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2); if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { if (newlen1 < newlen2 || (newlen1 == newlen2 && XFS_ABSDIFF(newbno1, wantbno) > XFS_ABSDIFF(newbno2, wantbno))) newbno1 = newbno2; } else if (newbno2 != NULLAGBLOCK) newbno1 = newbno2; } else if (freeend >= wantend) { newbno1 = wantbno; } else if (alignment > 1) { newbno1 = roundup(freeend - wantlen, alignment); if (newbno1 > freeend - wantlen && newbno1 - alignment >= freebno) newbno1 -= alignment; else if (newbno1 >= freeend) newbno1 = NULLAGBLOCK; } else newbno1 = freeend - wantlen; *newbnop = newbno1; return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); } /* * Fix up the length, based on mod and prod. * len should be k * prod + mod for some k. * If len is too small it is returned unchanged. * If len hits maxlen it is left alone. */ STATIC void xfs_alloc_fix_len( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_extlen_t k; xfs_extlen_t rlen; ASSERT(args->mod < args->prod); rlen = args->len; ASSERT(rlen >= args->minlen); ASSERT(rlen <= args->maxlen); if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen || (args->mod == 0 && rlen < args->prod)) return; k = rlen % args->prod; if (k == args->mod) return; if (k > args->mod) { if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen) return; } else { if ((int)(rlen = rlen - args->prod - (args->mod - k)) < (int)args->minlen) return; } ASSERT(rlen >= args->minlen); ASSERT(rlen <= args->maxlen); args->len = rlen; } /* * Fix up length if there is too little space left in the a.g. * Return 1 if ok, 0 if too little, should give up. */ STATIC int xfs_alloc_fix_minleft( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_agf_t *agf; /* a.g. freelist header */ int diff; /* free space difference */ if (args->minleft == 0) return 1; agf = XFS_BUF_TO_AGF(args->agbp); diff = be32_to_cpu(agf->agf_freeblks) - args->len - args->minleft; if (diff >= 0) return 1; args->len += diff; /* shrink the allocated space */ if (args->len >= args->minlen) return 1; args->agbno = NULLAGBLOCK; return 0; } /* * Update the two btrees, logically removing from freespace the extent * starting at rbno, rlen blocks. The extent is contained within the * actual (current) free extent fbno for flen blocks. * Flags are passed in indicating whether the cursors are set to the * relevant records. */ STATIC int /* error code */ xfs_alloc_fixup_trees( xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */ xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */ xfs_agblock_t fbno, /* starting block of free extent */ xfs_extlen_t flen, /* length of free extent */ xfs_agblock_t rbno, /* starting block of returned extent */ xfs_extlen_t rlen, /* length of returned extent */ int flags) /* flags, XFSA_FIXUP_... */ { int error; /* error code */ int i; /* operation results */ xfs_agblock_t nfbno1; /* first new free startblock */ xfs_agblock_t nfbno2; /* second new free startblock */ xfs_extlen_t nflen1=0; /* first new free length */ xfs_extlen_t nflen2=0; /* second new free length */ /* * Look up the record in the by-size tree if necessary. */ if (flags & XFSA_FIXUP_CNT_OK) { #ifdef DEBUG if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) return error; XFS_WANT_CORRUPTED_RETURN( i == 1 && nfbno1 == fbno && nflen1 == flen); #endif } else { if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } /* * Look up the record in the by-block tree if necessary. */ if (flags & XFSA_FIXUP_BNO_OK) { #ifdef DEBUG if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) return error; XFS_WANT_CORRUPTED_RETURN( i == 1 && nfbno1 == fbno && nflen1 == flen); #endif } else { if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } #ifdef DEBUG if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) { struct xfs_btree_block *bnoblock; struct xfs_btree_block *cntblock; bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]); cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]); XFS_WANT_CORRUPTED_RETURN( bnoblock->bb_numrecs == cntblock->bb_numrecs); } #endif /* * Deal with all four cases: the allocated record is contained * within the freespace record, so we can have new freespace * at either (or both) end, or no freespace remaining. */ if (rbno == fbno && rlen == flen) nfbno1 = nfbno2 = NULLAGBLOCK; else if (rbno == fbno) { nfbno1 = rbno + rlen; nflen1 = flen - rlen; nfbno2 = NULLAGBLOCK; } else if (rbno + rlen == fbno + flen) { nfbno1 = fbno; nflen1 = flen - rlen; nfbno2 = NULLAGBLOCK; } else { nfbno1 = fbno; nflen1 = rbno - fbno; nfbno2 = rbno + rlen; nflen2 = (fbno + flen) - nfbno2; } /* * Delete the entry from the by-size btree. */ if ((error = xfs_btree_delete(cnt_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); /* * Add new by-size btree entry(s). */ if (nfbno1 != NULLAGBLOCK) { if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 0); if ((error = xfs_btree_insert(cnt_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } if (nfbno2 != NULLAGBLOCK) { if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 0); if ((error = xfs_btree_insert(cnt_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } /* * Fix up the by-block btree entry(s). */ if (nfbno1 == NULLAGBLOCK) { /* * No remaining freespace, just delete the by-block tree entry. */ if ((error = xfs_btree_delete(bno_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } else { /* * Update the by-block entry to start later|be shorter. */ if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1))) return error; } if (nfbno2 != NULLAGBLOCK) { /* * 2 resulting free entries, need to add one. */ if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 0); if ((error = xfs_btree_insert(bno_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } return 0; } static bool xfs_agfl_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); int i; if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_uuid)) return false; if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC) return false; /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) return false; for (i = 0; i < XFS_AGFL_SIZE(mp); i++) { if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK && be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks) return false; } return true; } static void xfs_agfl_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; int agfl_ok = 1; /* * There is no verification of non-crc AGFLs because mkfs does not * initialise the AGFL to zero or NULL. Hence the only valid part of the * AGFL is what the AGF says is active. We can't get to the AGF, so we * can't verify just those entries are valid. */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; agfl_ok = xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), offsetof(struct xfs_agfl, agfl_crc)); agfl_ok = agfl_ok && xfs_agfl_verify(bp); if (!agfl_ok) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); } } static void xfs_agfl_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_buf_log_item *bip = bp->b_fspriv; /* no verification of non-crc AGFLs */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; if (!xfs_agfl_verify(bp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); return; } if (bip) XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), offsetof(struct xfs_agfl, agfl_crc)); } const struct xfs_buf_ops xfs_agfl_buf_ops = { .verify_read = xfs_agfl_read_verify, .verify_write = xfs_agfl_write_verify, }; /* * Read in the allocation group free block array. */ STATIC int /* error */ xfs_alloc_read_agfl( xfs_mount_t *mp, /* mount point structure */ xfs_trans_t *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ xfs_buf_t **bpp) /* buffer for the ag free block array */ { xfs_buf_t *bp; /* return value */ int error; ASSERT(agno != NULLAGNUMBER); error = xfs_trans_read_buf( mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); if (error) return error; ASSERT(!xfs_buf_geterror(bp)); xfs_buf_set_ref(bp, XFS_AGFL_REF); *bpp = bp; return 0; } STATIC int xfs_alloc_update_counters( struct xfs_trans *tp, struct xfs_perag *pag, struct xfs_buf *agbp, long len) { struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); pag->pagf_freeblks += len; be32_add_cpu(&agf->agf_freeblks, len); xfs_trans_agblocks_delta(tp, len); if (unlikely(be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))) return EFSCORRUPTED; xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); return 0; } /* * Allocation group level functions. */ /* * Allocate a variable extent in the allocation group agno. * Type and bno are used to determine where in the allocation group the * extent will start. * Extent's length (returned in *len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */ STATIC int /* error */ xfs_alloc_ag_vextent( xfs_alloc_arg_t *args) /* argument structure for allocation */ { int error=0; ASSERT(args->minlen > 0); ASSERT(args->maxlen > 0); ASSERT(args->minlen <= args->maxlen); ASSERT(args->mod < args->prod); ASSERT(args->alignment > 0); /* * Branch to correct routine based on the type. */ args->wasfromfl = 0; switch (args->type) { case XFS_ALLOCTYPE_THIS_AG: error = xfs_alloc_ag_vextent_size(args); break; case XFS_ALLOCTYPE_NEAR_BNO: error = xfs_alloc_ag_vextent_near(args); break; case XFS_ALLOCTYPE_THIS_BNO: error = xfs_alloc_ag_vextent_exact(args); break; default: ASSERT(0); /* NOTREACHED */ } if (error || args->agbno == NULLAGBLOCK) return error; ASSERT(args->len >= args->minlen); ASSERT(args->len <= args->maxlen); ASSERT(!args->wasfromfl || !args->isfl); ASSERT(args->agbno % args->alignment == 0); if (!args->wasfromfl) { error = xfs_alloc_update_counters(args->tp, args->pag, args->agbp, -((long)(args->len))); if (error) return error; ASSERT(!xfs_extent_busy_search(args->mp, args->agno, args->agbno, args->len)); } if (!args->isfl) { xfs_trans_mod_sb(args->tp, args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS : XFS_TRANS_SB_FDBLOCKS, -((long)(args->len))); } XFS_STATS_INC(xs_allocx); XFS_STATS_ADD(xs_allocb, args->len); return error; } /* * Allocate a variable extent at exactly agno/bno. * Extent's length (returned in *len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. */ STATIC int /* error */ xfs_alloc_ag_vextent_exact( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */ xfs_btree_cur_t *cnt_cur;/* by count btree cursor */ int error; xfs_agblock_t fbno; /* start block of found extent */ xfs_extlen_t flen; /* length of found extent */ xfs_agblock_t tbno; /* start block of trimmed extent */ xfs_extlen_t tlen; /* length of trimmed extent */ xfs_agblock_t tend; /* end block of trimmed extent */ int i; /* success/failure of operation */ ASSERT(args->alignment == 1); /* * Allocate/initialize a cursor for the by-number freespace btree. */ bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_BNO); /* * Lookup bno and minlen in the btree (minlen is irrelevant, really). * Look for the closest free block <= bno, it must contain bno * if any free block does. */ error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i); if (error) goto error0; if (!i) goto not_found; /* * Grab the freespace record. */ error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); ASSERT(fbno <= args->agbno); /* * Check for overlapping busy extents. */ xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen); /* * Give up if the start of the extent is busy, or the freespace isn't * long enough for the minimum request. */ if (tbno > args->agbno) goto not_found; if (tlen < args->minlen) goto not_found; tend = tbno + tlen; if (tend < args->agbno + args->minlen) goto not_found; /* * End of extent will be smaller of the freespace end and the * maximal requested end. * * Fix the length according to mod and prod if given. */ args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) - args->agbno; xfs_alloc_fix_len(args); if (!xfs_alloc_fix_minleft(args)) goto not_found; ASSERT(args->agbno + args->len <= tend); /* * We are allocating agbno for args->len * Allocate/initialize a cursor for the by-size btree. */ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_CNT); ASSERT(args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno, args->len, XFSA_FIXUP_BNO_OK); if (error) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); goto error0; } xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); args->wasfromfl = 0; trace_xfs_alloc_exact_done(args); return 0; not_found: /* Didn't find it, return null. */ xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); args->agbno = NULLAGBLOCK; trace_xfs_alloc_exact_notfound(args); return 0; error0: xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); trace_xfs_alloc_exact_error(args); return error; } /* * Search the btree in a given direction via the search cursor and compare * the records found against the good extent we've already found. */ STATIC int xfs_alloc_find_best_extent( struct xfs_alloc_arg *args, /* allocation argument structure */ struct xfs_btree_cur **gcur, /* good cursor */ struct xfs_btree_cur **scur, /* searching cursor */ xfs_agblock_t gdiff, /* difference for search comparison */ xfs_agblock_t *sbno, /* extent found by search */ xfs_extlen_t *slen, /* extent length */ xfs_agblock_t *sbnoa, /* aligned extent found by search */ xfs_extlen_t *slena, /* aligned extent length */ int dir) /* 0 = search right, 1 = search left */ { xfs_agblock_t new; xfs_agblock_t sdiff; int error; int i; /* The good extent is perfect, no need to search. */ if (!gdiff) goto out_use_good; /* * Look until we find a better one, run out of space or run off the end. */ do { error = xfs_alloc_get_rec(*scur, sbno, slen, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena); /* * The good extent is closer than this one. */ if (!dir) { if (*sbnoa >= args->agbno + gdiff) goto out_use_good; } else { if (*sbnoa <= args->agbno - gdiff) goto out_use_good; } /* * Same distance, compare length and pick the best. */ if (*slena >= args->minlen) { args->len = XFS_EXTLEN_MIN(*slena, args->maxlen); xfs_alloc_fix_len(args); sdiff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, *sbnoa, *slena, &new); /* * Choose closer size and invalidate other cursor. */ if (sdiff < gdiff) goto out_use_search; goto out_use_good; } if (!dir) error = xfs_btree_increment(*scur, 0, &i); else error = xfs_btree_decrement(*scur, 0, &i); if (error) goto error0; } while (i); out_use_good: xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR); *scur = NULL; return 0; out_use_search: xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR); *gcur = NULL; return 0; error0: /* caller invalidates cursors */ return error; } /* * Allocate a variable extent near bno in the allocation group agno. * Extent's length (returned in len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */ STATIC int /* error */ xfs_alloc_ag_vextent_near( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ xfs_agblock_t gtbno; /* start bno of right side entry */ xfs_agblock_t gtbnoa; /* aligned ... */ xfs_extlen_t gtdiff; /* difference to right side entry */ xfs_extlen_t gtlen; /* length of right side entry */ xfs_extlen_t gtlena; /* aligned ... */ xfs_agblock_t gtnew; /* useful start bno of right side */ int error; /* error code */ int i; /* result code, temporary */ int j; /* result code, temporary */ xfs_agblock_t ltbno; /* start bno of left side entry */ xfs_agblock_t ltbnoa; /* aligned ... */ xfs_extlen_t ltdiff; /* difference to left side entry */ xfs_extlen_t ltlen; /* length of left side entry */ xfs_extlen_t ltlena; /* aligned ... */ xfs_agblock_t ltnew; /* useful start bno of left side */ xfs_extlen_t rlen; /* length of returned extent */ int forced = 0; #if defined(DEBUG) && defined(__KERNEL__) /* * Randomly don't execute the first algorithm. */ int dofirst; /* set to do first algorithm */ dofirst = prandom_u32() & 1; #endif restart: bno_cur_lt = NULL; bno_cur_gt = NULL; ltlen = 0; gtlena = 0; ltlena = 0; /* * Get a cursor for the by-size btree. */ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_CNT); /* * See if there are any free extents as big as maxlen. */ if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i))) goto error0; /* * If none, then pick up the last entry in the tree unless the * tree is empty. */ if (!i) { if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno, &ltlen, &i))) goto error0; if (i == 0 || ltlen == 0) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_near_noentry(args); return 0; } ASSERT(i == 1); } args->wasfromfl = 0; /* * First algorithm. * If the requested extent is large wrt the freespaces available * in this a.g., then the cursor will be pointing to a btree entry * near the right edge of the tree. If it's in the last btree leaf * block, then we just examine all the entries in that block * that are big enough, and pick the best one. * This is written as a while loop so we can break out of it, * but we never loop back to the top. */ while (xfs_btree_islastblock(cnt_cur, 0)) { xfs_extlen_t bdiff; int besti=0; xfs_extlen_t blen=0; xfs_agblock_t bnew=0; #if defined(DEBUG) && defined(__KERNEL__) if (!dofirst) break; #endif /* * Start from the entry that lookup found, sequence through * all larger free blocks. If we're actually pointing at a * record smaller than maxlen, go to the start of this block, * and skip all those smaller than minlen. */ if (ltlen || args->alignment > 1) { cnt_cur->bc_ptrs[0] = 1; do { if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (ltlen >= args->minlen) break; if ((error = xfs_btree_increment(cnt_cur, 0, &i))) goto error0; } while (i); ASSERT(ltlen >= args->minlen); if (!i) break; } i = cnt_cur->bc_ptrs[0]; for (j = 1, blen = 0, bdiff = 0; !error && j && (blen < args->maxlen || bdiff > 0); error = xfs_btree_increment(cnt_cur, 0, &j)) { /* * For each entry, decide if it's better than * the previous best entry. */ if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_alloc_compute_aligned(args, ltbno, ltlen, &ltbnoa, &ltlena); if (ltlena < args->minlen) continue; args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); xfs_alloc_fix_len(args); ASSERT(args->len >= args->minlen); if (args->len < blen) continue; ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, ltbnoa, ltlena, &ltnew); if (ltnew != NULLAGBLOCK && (args->len > blen || ltdiff < bdiff)) { bdiff = ltdiff; bnew = ltnew; blen = args->len; besti = cnt_cur->bc_ptrs[0]; } } /* * It didn't work. We COULD be in a case where * there's a good record somewhere, so try again. */ if (blen == 0) break; /* * Point at the best entry, and retrieve it again. */ cnt_cur->bc_ptrs[0] = besti; if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); args->len = blen; if (!xfs_alloc_fix_minleft(args)) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_near_nominleft(args); return 0; } blen = args->len; /* * We are allocating starting at bnew for blen blocks. */ args->agbno = bnew; ASSERT(bnew >= ltbno); ASSERT(bnew + blen <= ltbno + ltlen); /* * Set up a cursor for the by-bno tree. */ bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_BNO); /* * Fix up the btree entries. */ if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, bnew, blen, XFSA_FIXUP_CNT_OK))) goto error0; xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); trace_xfs_alloc_near_first(args); return 0; } /* * Second algorithm. * Search in the by-bno tree to the left and to the right * simultaneously, until in each case we find a space big enough, * or run into the edge of the tree. When we run into the edge, * we deallocate that cursor. * If both searches succeed, we compare the two spaces and pick * the better one. * With alignment, it's possible for both to fail; the upper * level algorithm that picks allocation groups for allocations * is not supposed to do this. */ /* * Allocate and initialize the cursor for the leftward search. */ bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_BNO); /* * Lookup <= bno to find the leftward search's starting point. */ if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i))) goto error0; if (!i) { /* * Didn't find anything; use this cursor for the rightward * search. */ bno_cur_gt = bno_cur_lt; bno_cur_lt = NULL; } /* * Found something. Duplicate the cursor for the rightward search. */ else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt))) goto error0; /* * Increment the cursor, so we will point at the entry just right * of the leftward entry if any, or to the leftmost entry. */ if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) goto error0; if (!i) { /* * It failed, there are no rightward entries. */ xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR); bno_cur_gt = NULL; } /* * Loop going left with the leftward cursor, right with the * rightward cursor, until either both directions give up or * we find an entry at least as big as minlen. */ do { if (bno_cur_lt) { if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_alloc_compute_aligned(args, ltbno, ltlen, &ltbnoa, &ltlena); if (ltlena >= args->minlen) break; if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i))) goto error0; if (!i) { xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); bno_cur_lt = NULL; } } if (bno_cur_gt) { if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_alloc_compute_aligned(args, gtbno, gtlen, &gtbnoa, &gtlena); if (gtlena >= args->minlen) break; if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) goto error0; if (!i) { xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR); bno_cur_gt = NULL; } } } while (bno_cur_lt || bno_cur_gt); /* * Got both cursors still active, need to find better entry. */ if (bno_cur_lt && bno_cur_gt) { if (ltlena >= args->minlen) { /* * Left side is good, look for a right side entry. */ args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); xfs_alloc_fix_len(args); ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, ltbnoa, ltlena, &ltnew); error = xfs_alloc_find_best_extent(args, &bno_cur_lt, &bno_cur_gt, ltdiff, &gtbno, &gtlen, &gtbnoa, &gtlena, 0 /* search right */); } else { ASSERT(gtlena >= args->minlen); /* * Right side is good, look for a left side entry. */ args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); xfs_alloc_fix_len(args); gtdiff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, gtbnoa, gtlena, &gtnew); error = xfs_alloc_find_best_extent(args, &bno_cur_gt, &bno_cur_lt, gtdiff, &ltbno, &ltlen, &ltbnoa, &ltlena, 1 /* search left */); } if (error) goto error0; } /* * If we couldn't get anything, give up. */ if (bno_cur_lt == NULL && bno_cur_gt == NULL) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); if (!forced++) { trace_xfs_alloc_near_busy(args); xfs_log_force(args->mp, XFS_LOG_SYNC); goto restart; } trace_xfs_alloc_size_neither(args); args->agbno = NULLAGBLOCK; return 0; } /* * At this point we have selected a freespace entry, either to the * left or to the right. If it's on the right, copy all the * useful variables to the "left" set so we only have one * copy of this code. */ if (bno_cur_gt) { bno_cur_lt = bno_cur_gt; bno_cur_gt = NULL; ltbno = gtbno; ltbnoa = gtbnoa; ltlen = gtlen; ltlena = gtlena; j = 1; } else j = 0; /* * Fix up the length and compute the useful address. */ args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); xfs_alloc_fix_len(args); if (!xfs_alloc_fix_minleft(args)) { trace_xfs_alloc_near_nominleft(args); xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); return 0; } rlen = args->len; (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbnoa, ltlena, &ltnew); ASSERT(ltnew >= ltbno); ASSERT(ltnew + rlen <= ltbnoa + ltlena); ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); args->agbno = ltnew; if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, ltnew, rlen, XFSA_FIXUP_BNO_OK))) goto error0; if (j) trace_xfs_alloc_near_greater(args); else trace_xfs_alloc_near_lesser(args); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); return 0; error0: trace_xfs_alloc_near_error(args); if (cnt_cur != NULL) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); if (bno_cur_lt != NULL) xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR); if (bno_cur_gt != NULL) xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR); return error; } /* * Allocate a variable extent anywhere in the allocation group agno. * Extent's length (returned in len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */ STATIC int /* error */ xfs_alloc_ag_vextent_size( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_btree_cur_t *bno_cur; /* cursor for bno btree */ xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */ int error; /* error result */ xfs_agblock_t fbno; /* start of found freespace */ xfs_extlen_t flen; /* length of found freespace */ int i; /* temp status variable */ xfs_agblock_t rbno; /* returned block number */ xfs_extlen_t rlen; /* length of returned extent */ int forced = 0; restart: /* * Allocate and initialize a cursor for the by-size btree. */ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_CNT); bno_cur = NULL; /* * Look for an entry >= maxlen+alignment-1 blocks. */ if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen + args->alignment - 1, &i))) goto error0; /* * If none or we have busy extents that we cannot allocate from, then * we have to settle for a smaller extent. In the case that there are * no large extents, this will return the last entry in the tree unless * the tree is empty. In the case that there are only busy large * extents, this will return the largest small extent unless there * are no smaller extents available. */ if (!i || forced > 1) { error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno, &flen, &i); if (error) goto error0; if (i == 0 || flen == 0) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_noentry(args); return 0; } ASSERT(i == 1); xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); } else { /* * Search for a non-busy extent that is large enough. * If we are at low space, don't check, or if we fall of * the end of the btree, turn off the busy check and * restart. */ for (;;) { error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); if (rlen >= args->maxlen) break; error = xfs_btree_increment(cnt_cur, 0, &i); if (error) goto error0; if (i == 0) { /* * Our only valid extents must have been busy. * Make it unbusy by forcing the log out and * retrying. If we've been here before, forcing * the log isn't making the extents available, * which means they have probably been freed in * this transaction. In that case, we have to * give up on them and we'll attempt a minlen * allocation the next time around. */ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_busy(args); if (!forced++) xfs_log_force(args->mp, XFS_LOG_SYNC); goto restart; } } } /* * In the first case above, we got the last entry in the * by-size btree. Now we check to see if the space hits maxlen * once aligned; if not, we search left for something better. * This can't happen in the second case above. */ rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); XFS_WANT_CORRUPTED_GOTO(rlen == 0 || (rlen <= flen && rbno + rlen <= fbno + flen), error0); if (rlen < args->maxlen) { xfs_agblock_t bestfbno; xfs_extlen_t bestflen; xfs_agblock_t bestrbno; xfs_extlen_t bestrlen; bestrlen = rlen; bestrbno = rbno; bestflen = flen; bestfbno = fbno; for (;;) { if ((error = xfs_btree_decrement(cnt_cur, 0, &i))) goto error0; if (i == 0) break; if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (flen < bestrlen) break; xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); XFS_WANT_CORRUPTED_GOTO(rlen == 0 || (rlen <= flen && rbno + rlen <= fbno + flen), error0); if (rlen > bestrlen) { bestrlen = rlen; bestrbno = rbno; bestflen = flen; bestfbno = fbno; if (rlen == args->maxlen) break; } } if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); rlen = bestrlen; rbno = bestrbno; flen = bestflen; fbno = bestfbno; } args->wasfromfl = 0; /* * Fix up the length. */ args->len = rlen; if (rlen < args->minlen) { if (!forced++) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_busy(args); xfs_log_force(args->mp, XFS_LOG_SYNC); goto restart; } goto out_nominleft; } xfs_alloc_fix_len(args); if (!xfs_alloc_fix_minleft(args)) goto out_nominleft; rlen = args->len; XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0); /* * Allocate and initialize a cursor for the by-block tree. */ bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, args->agno, XFS_BTNUM_BNO); if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, rbno, rlen, XFSA_FIXUP_CNT_OK))) goto error0; xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); cnt_cur = bno_cur = NULL; args->len = rlen; args->agbno = rbno; XFS_WANT_CORRUPTED_GOTO( args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); trace_xfs_alloc_size_done(args); return 0; error0: trace_xfs_alloc_size_error(args); if (cnt_cur) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); if (bno_cur) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); return error; out_nominleft: xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_nominleft(args); args->agbno = NULLAGBLOCK; return 0; } /* * Deal with the case where only small freespaces remain. * Either return the contents of the last freespace record, * or allocate space from the freelist if there is nothing in the tree. */ STATIC int /* error */ xfs_alloc_ag_vextent_small( xfs_alloc_arg_t *args, /* allocation argument structure */ xfs_btree_cur_t *ccur, /* by-size cursor */ xfs_agblock_t *fbnop, /* result block number */ xfs_extlen_t *flenp, /* result length */ int *stat) /* status: 0-freelist, 1-normal/none */ { int error; xfs_agblock_t fbno; xfs_extlen_t flen; int i; if ((error = xfs_btree_decrement(ccur, 0, &i))) goto error0; if (i) { if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } /* * Nothing in the btree, try the freelist. Make sure * to respect minleft even when pulling from the * freelist. */ else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) > args->minleft)) { error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0); if (error) goto error0; if (fbno != NULLAGBLOCK) { xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1, args->userdata); if (args->userdata) { xfs_buf_t *bp; bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno, 0); xfs_trans_binval(args->tp, bp); } args->len = 1; args->agbno = fbno; XFS_WANT_CORRUPTED_GOTO( args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); args->wasfromfl = 1; trace_xfs_alloc_small_freelist(args); *stat = 0; return 0; } /* * Nothing in the freelist. */ else flen = 0; } /* * Can't allocate from the freelist for some reason. */ else { fbno = NULLAGBLOCK; flen = 0; } /* * Can't do the allocation, give up. */ if (flen < args->minlen) { args->agbno = NULLAGBLOCK; trace_xfs_alloc_small_notenough(args); flen = 0; } *fbnop = fbno; *flenp = flen; *stat = 1; trace_xfs_alloc_small_done(args); return 0; error0: trace_xfs_alloc_small_error(args); return error; } /* * Free the extent starting at agno/bno for length. */ STATIC int /* error */ xfs_free_ag_extent( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *agbp, /* buffer for a.g. freelist header */ xfs_agnumber_t agno, /* allocation group number */ xfs_agblock_t bno, /* starting block number */ xfs_extlen_t len, /* length of extent */ int isfl) /* set if is freelist blocks - no sb acctg */ { xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */ xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */ int error; /* error return value */ xfs_agblock_t gtbno; /* start of right neighbor block */ xfs_extlen_t gtlen; /* length of right neighbor block */ int haveleft; /* have a left neighbor block */ int haveright; /* have a right neighbor block */ int i; /* temp, result code */ xfs_agblock_t ltbno; /* start of left neighbor block */ xfs_extlen_t ltlen; /* length of left neighbor block */ xfs_mount_t *mp; /* mount point struct for filesystem */ xfs_agblock_t nbno; /* new starting block of freespace */ xfs_extlen_t nlen; /* new length of freespace */ xfs_perag_t *pag; /* per allocation group data */ mp = tp->t_mountp; /* * Allocate and initialize a cursor for the by-block btree. */ bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO); cnt_cur = NULL; /* * Look for a neighboring block on the left (lower block numbers) * that is contiguous with this space. */ if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft))) goto error0; if (haveleft) { /* * There is a block to our left. */ if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * It's not contiguous, though. */ if (ltbno + ltlen < bno) haveleft = 0; else { /* * If this failure happens the request to free this * space was invalid, it's (partly) already free. * Very bad. */ XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0); } } /* * Look for a neighboring block on the right (higher block numbers) * that is contiguous with this space. */ if ((error = xfs_btree_increment(bno_cur, 0, &haveright))) goto error0; if (haveright) { /* * There is a block to our right. */ if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * It's not contiguous, though. */ if (bno + len < gtbno) haveright = 0; else { /* * If this failure happens the request to free this * space was invalid, it's (partly) already free. * Very bad. */ XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0); } } /* * Now allocate and initialize a cursor for the by-size tree. */ cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT); /* * Have both left and right contiguous neighbors. * Merge all three into a single free block. */ if (haveleft && haveright) { /* * Delete the old by-size entry on the left. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Delete the old by-size entry on the right. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Delete the old by-block entry for the right block. */ if ((error = xfs_btree_delete(bno_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Move the by-block cursor back to the left neighbor. */ if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); #ifdef DEBUG /* * Check that this is the right record: delete didn't * mangle the cursor. */ { xfs_agblock_t xxbno; xfs_extlen_t xxlen; if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO( i == 1 && xxbno == ltbno && xxlen == ltlen, error0); } #endif /* * Update remaining by-block entry to the new, joined block. */ nbno = ltbno; nlen = len + ltlen + gtlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) goto error0; } /* * Have only a left contiguous neighbor. * Merge it together with the new freespace. */ else if (haveleft) { /* * Delete the old by-size entry on the left. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Back up the by-block cursor to the left neighbor, and * update its length. */ if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); nbno = ltbno; nlen = len + ltlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) goto error0; } /* * Have only a right contiguous neighbor. * Merge it together with the new freespace. */ else if (haveright) { /* * Delete the old by-size entry on the right. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Update the starting block and length of the right * neighbor in the by-block tree. */ nbno = bno; nlen = len + gtlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) goto error0; } /* * No contiguous neighbors. * Insert the new freespace into the by-block tree. */ else { nbno = bno; nlen = len; if ((error = xfs_btree_insert(bno_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); bno_cur = NULL; /* * In all cases we need to insert the new freespace in the by-size tree. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 0, error0); if ((error = xfs_btree_insert(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); cnt_cur = NULL; /* * Update the freespace totals in the ag and superblock. */ pag = xfs_perag_get(mp, agno); error = xfs_alloc_update_counters(tp, pag, agbp, len); xfs_perag_put(pag); if (error) goto error0; if (!isfl) xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); XFS_STATS_INC(xs_freex); XFS_STATS_ADD(xs_freeb, len); trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright); return 0; error0: trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1); if (bno_cur) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); if (cnt_cur) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); return error; } /* * Visible (exported) allocation/free functions. * Some of these are used just by xfs_alloc_btree.c and this file. */ /* * Compute and fill in value of m_ag_maxlevels. */ void xfs_alloc_compute_maxlevels( xfs_mount_t *mp) /* file system mount structure */ { int level; uint maxblocks; uint maxleafents; int minleafrecs; int minnoderecs; maxleafents = (mp->m_sb.sb_agblocks + 1) / 2; minleafrecs = mp->m_alloc_mnr[0]; minnoderecs = mp->m_alloc_mnr[1]; maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; for (level = 1; maxblocks > 1; level++) maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; mp->m_ag_maxlevels = level; } /* * Find the length of the longest extent in an AG. */ xfs_extlen_t xfs_alloc_longest_free_extent( struct xfs_mount *mp, struct xfs_perag *pag) { xfs_extlen_t need, delta = 0; need = XFS_MIN_FREELIST_PAG(pag, mp); if (need > pag->pagf_flcount) delta = need - pag->pagf_flcount; if (pag->pagf_longest > delta) return pag->pagf_longest - delta; return pag->pagf_flcount > 0 || pag->pagf_longest > 0; } /* * Decide whether to use this allocation group for this allocation. * If so, fix up the btree freelist's size. */ STATIC int /* error */ xfs_alloc_fix_freelist( xfs_alloc_arg_t *args, /* allocation argument structure */ int flags) /* XFS_ALLOC_FLAG_... */ { xfs_buf_t *agbp; /* agf buffer pointer */ xfs_agf_t *agf; /* a.g. freespace structure pointer */ xfs_buf_t *agflbp;/* agfl buffer pointer */ xfs_agblock_t bno; /* freelist block */ xfs_extlen_t delta; /* new blocks needed in freelist */ int error; /* error result code */ xfs_extlen_t longest;/* longest extent in allocation group */ xfs_mount_t *mp; /* file system mount point structure */ xfs_extlen_t need; /* total blocks needed in freelist */ xfs_perag_t *pag; /* per-ag information structure */ xfs_alloc_arg_t targs; /* local allocation arguments */ xfs_trans_t *tp; /* transaction pointer */ mp = args->mp; pag = args->pag; tp = args->tp; if (!pag->pagf_init) { if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp))) return error; if (!pag->pagf_init) { ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } } else agbp = NULL; /* * If this is a metadata preferred pag and we are user data * then try somewhere else if we are not being asked to * try harder at this point */ if (pag->pagf_metadata && args->userdata && (flags & XFS_ALLOC_FLAG_TRYLOCK)) { ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } if (!(flags & XFS_ALLOC_FLAG_FREEING)) { /* * If it looks like there isn't a long enough extent, or enough * total blocks, reject it. */ need = XFS_MIN_FREELIST_PAG(pag, mp); longest = xfs_alloc_longest_free_extent(mp, pag); if ((args->minlen + args->alignment + args->minalignslop - 1) > longest || ((int)(pag->pagf_freeblks + pag->pagf_flcount - need - args->total) < (int)args->minleft)) { if (agbp) xfs_trans_brelse(tp, agbp); args->agbp = NULL; return 0; } } /* * Get the a.g. freespace buffer. * Can fail if we're not blocking on locks, and it's held. */ if (agbp == NULL) { if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp))) return error; if (agbp == NULL) { ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } } /* * Figure out how many blocks we should have in the freelist. */ agf = XFS_BUF_TO_AGF(agbp); need = XFS_MIN_FREELIST(agf, mp); /* * If there isn't enough total or single-extent, reject it. */ if (!(flags & XFS_ALLOC_FLAG_FREEING)) { delta = need > be32_to_cpu(agf->agf_flcount) ? (need - be32_to_cpu(agf->agf_flcount)) : 0; longest = be32_to_cpu(agf->agf_longest); longest = (longest > delta) ? (longest - delta) : (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); if ((args->minlen + args->alignment + args->minalignslop - 1) > longest || ((int)(be32_to_cpu(agf->agf_freeblks) + be32_to_cpu(agf->agf_flcount) - need - args->total) < (int)args->minleft)) { xfs_trans_brelse(tp, agbp); args->agbp = NULL; return 0; } } /* * Make the freelist shorter if it's too long. */ while (be32_to_cpu(agf->agf_flcount) > need) { xfs_buf_t *bp; error = xfs_alloc_get_freelist(tp, agbp, &bno, 0); if (error) return error; if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1))) return error; bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); xfs_trans_binval(tp, bp); } /* * Initialize the args structure. */ memset(&targs, 0, sizeof(targs)); targs.tp = tp; targs.mp = mp; targs.agbp = agbp; targs.agno = args->agno; targs.alignment = targs.minlen = targs.prod = targs.isfl = 1; targs.type = XFS_ALLOCTYPE_THIS_AG; targs.pag = pag; if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp))) return error; /* * Make the freelist longer if it's too short. */ while (be32_to_cpu(agf->agf_flcount) < need) { targs.agbno = 0; targs.maxlen = need - be32_to_cpu(agf->agf_flcount); /* * Allocate as many blocks as possible at once. */ if ((error = xfs_alloc_ag_vextent(&targs))) { xfs_trans_brelse(tp, agflbp); return error; } /* * Stop if we run out. Won't happen if callers are obeying * the restrictions correctly. Can happen for free calls * on a completely full ag. */ if (targs.agbno == NULLAGBLOCK) { if (flags & XFS_ALLOC_FLAG_FREEING) break; xfs_trans_brelse(tp, agflbp); args->agbp = NULL; return 0; } /* * Put each allocated block on the list. */ for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { error = xfs_alloc_put_freelist(tp, agbp, agflbp, bno, 0); if (error) return error; } } xfs_trans_brelse(tp, agflbp); args->agbp = agbp; return 0; } /* * Get a block from the freelist. * Returns with the buffer for the block gotten. */ int /* error */ xfs_alloc_get_freelist( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *agbp, /* buffer containing the agf structure */ xfs_agblock_t *bnop, /* block address retrieved from freelist */ int btreeblk) /* destination is a AGF btree */ { xfs_agf_t *agf; /* a.g. freespace structure */ xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ xfs_agblock_t bno; /* block number returned */ __be32 *agfl_bno; int error; int logflags; xfs_mount_t *mp = tp->t_mountp; xfs_perag_t *pag; /* per allocation group data */ /* * Freelist is empty, give up. */ agf = XFS_BUF_TO_AGF(agbp); if (!agf->agf_flcount) { *bnop = NULLAGBLOCK; return 0; } /* * Read the array of free blocks. */ error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno), &agflbp); if (error) return error; /* * Get the block number and update the data structures. */ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); be32_add_cpu(&agf->agf_flfirst, 1); xfs_trans_brelse(tp, agflbp); if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) agf->agf_flfirst = 0; pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); be32_add_cpu(&agf->agf_flcount, -1); xfs_trans_agflist_delta(tp, -1); pag->pagf_flcount--; xfs_perag_put(pag); logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; if (btreeblk) { be32_add_cpu(&agf->agf_btreeblks, 1); pag->pagf_btreeblks++; logflags |= XFS_AGF_BTREEBLKS; } xfs_alloc_log_agf(tp, agbp, logflags); *bnop = bno; return 0; } /* * Log the given fields from the agf structure. */ void xfs_alloc_log_agf( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *bp, /* buffer for a.g. freelist header */ int fields) /* mask of fields to be logged (XFS_AGF_...) */ { int first; /* first byte offset */ int last; /* last byte offset */ static const short offsets[] = { offsetof(xfs_agf_t, agf_magicnum), offsetof(xfs_agf_t, agf_versionnum), offsetof(xfs_agf_t, agf_seqno), offsetof(xfs_agf_t, agf_length), offsetof(xfs_agf_t, agf_roots[0]), offsetof(xfs_agf_t, agf_levels[0]), offsetof(xfs_agf_t, agf_flfirst), offsetof(xfs_agf_t, agf_fllast), offsetof(xfs_agf_t, agf_flcount), offsetof(xfs_agf_t, agf_freeblks), offsetof(xfs_agf_t, agf_longest), offsetof(xfs_agf_t, agf_btreeblks), offsetof(xfs_agf_t, agf_uuid), sizeof(xfs_agf_t) }; trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF); xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); } /* * Interface for inode allocation to force the pag data to be initialized. */ int /* error */ xfs_alloc_pagf_init( xfs_mount_t *mp, /* file system mount structure */ xfs_trans_t *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ int flags) /* XFS_ALLOC_FLAGS_... */ { xfs_buf_t *bp; int error; if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) return error; if (bp) xfs_trans_brelse(tp, bp); return 0; } /* * Put the block on the freelist for the allocation group. */ int /* error */ xfs_alloc_put_freelist( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *agbp, /* buffer for a.g. freelist header */ xfs_buf_t *agflbp,/* buffer for a.g. free block array */ xfs_agblock_t bno, /* block being freed */ int btreeblk) /* block came from a AGF btree */ { xfs_agf_t *agf; /* a.g. freespace structure */ __be32 *blockp;/* pointer to array entry */ int error; int logflags; xfs_mount_t *mp; /* mount structure */ xfs_perag_t *pag; /* per allocation group data */ __be32 *agfl_bno; int startoff; agf = XFS_BUF_TO_AGF(agbp); mp = tp->t_mountp; if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno), &agflbp))) return error; be32_add_cpu(&agf->agf_fllast, 1); if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) agf->agf_fllast = 0; pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); be32_add_cpu(&agf->agf_flcount, 1); xfs_trans_agflist_delta(tp, 1); pag->pagf_flcount++; logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; if (btreeblk) { be32_add_cpu(&agf->agf_btreeblks, -1); pag->pagf_btreeblks--; logflags |= XFS_AGF_BTREEBLKS; } xfs_perag_put(pag); xfs_alloc_log_agf(tp, agbp, logflags); ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)]; *blockp = cpu_to_be32(bno); startoff = (char *)blockp - (char *)agflbp->b_addr; xfs_alloc_log_agf(tp, agbp, logflags); xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF); xfs_trans_log_buf(tp, agflbp, startoff, startoff + sizeof(xfs_agblock_t) - 1); return 0; } static bool xfs_agf_verify( struct xfs_mount *mp, struct xfs_buf *bp) { struct xfs_agf *agf = XFS_BUF_TO_AGF(bp); if (xfs_sb_version_hascrc(&mp->m_sb) && !uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_uuid)) return false; if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp))) return false; /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno) return false; if (xfs_sb_version_haslazysbcount(&mp->m_sb) && be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) return false; return true;; } static void xfs_agf_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; int agf_ok = 1; if (xfs_sb_version_hascrc(&mp->m_sb)) agf_ok = xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), offsetof(struct xfs_agf, agf_crc)); agf_ok = agf_ok && xfs_agf_verify(mp, bp); if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, XFS_RANDOM_ALLOC_READ_AGF))) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); } } static void xfs_agf_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_buf_log_item *bip = bp->b_fspriv; if (!xfs_agf_verify(mp, bp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); return; } if (!xfs_sb_version_hascrc(&mp->m_sb)) return; if (bip) XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), offsetof(struct xfs_agf, agf_crc)); } const struct xfs_buf_ops xfs_agf_buf_ops = { .verify_read = xfs_agf_read_verify, .verify_write = xfs_agf_write_verify, }; /* * Read in the allocation group header (free/alloc section). */ int /* error */ xfs_read_agf( struct xfs_mount *mp, /* mount point structure */ struct xfs_trans *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ int flags, /* XFS_BUF_ */ struct xfs_buf **bpp) /* buffer for the ag freelist header */ { int error; ASSERT(agno != NULLAGNUMBER); error = xfs_trans_read_buf( mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops); if (error) return error; if (!*bpp) return 0; ASSERT(!(*bpp)->b_error); xfs_buf_set_ref(*bpp, XFS_AGF_REF); return 0; } /* * Read in the allocation group header (free/alloc section). */ int /* error */ xfs_alloc_read_agf( struct xfs_mount *mp, /* mount point structure */ struct xfs_trans *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ int flags, /* XFS_ALLOC_FLAG_... */ struct xfs_buf **bpp) /* buffer for the ag freelist header */ { struct xfs_agf *agf; /* ag freelist header */ struct xfs_perag *pag; /* per allocation group data */ int error; ASSERT(agno != NULLAGNUMBER); error = xfs_read_agf(mp, tp, agno, (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0, bpp); if (error) return error; if (!*bpp) return 0; ASSERT(!(*bpp)->b_error); agf = XFS_BUF_TO_AGF(*bpp); pag = xfs_perag_get(mp, agno); if (!pag->pagf_init) { pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); pag->pagf_longest = be32_to_cpu(agf->agf_longest); pag->pagf_levels[XFS_BTNUM_BNOi] = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); pag->pagf_levels[XFS_BTNUM_CNTi] = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); spin_lock_init(&pag->pagb_lock); pag->pagb_count = 0; pag->pagb_tree = RB_ROOT; pag->pagf_init = 1; } #ifdef DEBUG else if (!XFS_FORCED_SHUTDOWN(mp)) { ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks)); ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi])); ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); } #endif xfs_perag_put(pag); return 0; } /* * Allocate an extent (variable-size). * Depending on the allocation type, we either look in a single allocation * group or loop over the allocation groups to find the result. */ int /* error */ xfs_alloc_vextent( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_agblock_t agsize; /* allocation group size */ int error; int flags; /* XFS_ALLOC_FLAG_... locking flags */ xfs_extlen_t minleft;/* minimum left value, temp copy */ xfs_mount_t *mp; /* mount structure pointer */ xfs_agnumber_t sagno; /* starting allocation group number */ xfs_alloctype_t type; /* input allocation type */ int bump_rotor = 0; int no_min = 0; xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */ mp = args->mp; type = args->otype = args->type; args->agbno = NULLAGBLOCK; /* * Just fix this up, for the case where the last a.g. is shorter * (or there's only one a.g.) and the caller couldn't easily figure * that out (xfs_bmap_alloc). */ agsize = mp->m_sb.sb_agblocks; if (args->maxlen > agsize) args->maxlen = agsize; if (args->alignment == 0) args->alignment = 1; ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount); ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize); ASSERT(args->minlen <= args->maxlen); ASSERT(args->minlen <= agsize); ASSERT(args->mod < args->prod); if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount || XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize || args->minlen > args->maxlen || args->minlen > agsize || args->mod >= args->prod) { args->fsbno = NULLFSBLOCK; trace_xfs_alloc_vextent_badargs(args); return 0; } minleft = args->minleft; switch (type) { case XFS_ALLOCTYPE_THIS_AG: case XFS_ALLOCTYPE_NEAR_BNO: case XFS_ALLOCTYPE_THIS_BNO: /* * These three force us into a single a.g. */ args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); args->pag = xfs_perag_get(mp, args->agno); args->minleft = 0; error = xfs_alloc_fix_freelist(args, 0); args->minleft = minleft; if (error) { trace_xfs_alloc_vextent_nofix(args); goto error0; } if (!args->agbp) { trace_xfs_alloc_vextent_noagbp(args); break; } args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); if ((error = xfs_alloc_ag_vextent(args))) goto error0; break; case XFS_ALLOCTYPE_START_BNO: /* * Try near allocation first, then anywhere-in-ag after * the first a.g. fails. */ if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) && (mp->m_flags & XFS_MOUNT_32BITINODES)) { args->fsbno = XFS_AGB_TO_FSB(mp, ((mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount), 0); bump_rotor = 1; } args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); args->type = XFS_ALLOCTYPE_NEAR_BNO; /* FALLTHROUGH */ case XFS_ALLOCTYPE_ANY_AG: case XFS_ALLOCTYPE_START_AG: case XFS_ALLOCTYPE_FIRST_AG: /* * Rotate through the allocation groups looking for a winner. */ if (type == XFS_ALLOCTYPE_ANY_AG) { /* * Start with the last place we left off. */ args->agno = sagno = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount; args->type = XFS_ALLOCTYPE_THIS_AG; flags = XFS_ALLOC_FLAG_TRYLOCK; } else if (type == XFS_ALLOCTYPE_FIRST_AG) { /* * Start with allocation group given by bno. */ args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); args->type = XFS_ALLOCTYPE_THIS_AG; sagno = 0; flags = 0; } else { if (type == XFS_ALLOCTYPE_START_AG) args->type = XFS_ALLOCTYPE_THIS_AG; /* * Start with the given allocation group. */ args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno); flags = XFS_ALLOC_FLAG_TRYLOCK; } /* * Loop over allocation groups twice; first time with * trylock set, second time without. */ for (;;) { args->pag = xfs_perag_get(mp, args->agno); if (no_min) args->minleft = 0; error = xfs_alloc_fix_freelist(args, flags); args->minleft = minleft; if (error) { trace_xfs_alloc_vextent_nofix(args); goto error0; } /* * If we get a buffer back then the allocation will fly. */ if (args->agbp) { if ((error = xfs_alloc_ag_vextent(args))) goto error0; break; } trace_xfs_alloc_vextent_loopfailed(args); /* * Didn't work, figure out the next iteration. */ if (args->agno == sagno && type == XFS_ALLOCTYPE_START_BNO) args->type = XFS_ALLOCTYPE_THIS_AG; /* * For the first allocation, we can try any AG to get * space. However, if we already have allocated a * block, we don't want to try AGs whose number is below * sagno. Otherwise, we may end up with out-of-order * locking of AGF, which might cause deadlock. */ if (++(args->agno) == mp->m_sb.sb_agcount) { if (args->firstblock != NULLFSBLOCK) args->agno = sagno; else args->agno = 0; } /* * Reached the starting a.g., must either be done * or switch to non-trylock mode. */ if (args->agno == sagno) { if (no_min == 1) { args->agbno = NULLAGBLOCK; trace_xfs_alloc_vextent_allfailed(args); break; } if (flags == 0) { no_min = 1; } else { flags = 0; if (type == XFS_ALLOCTYPE_START_BNO) { args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); args->type = XFS_ALLOCTYPE_NEAR_BNO; } } } xfs_perag_put(args->pag); } if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) { if (args->agno == sagno) mp->m_agfrotor = (mp->m_agfrotor + 1) % (mp->m_sb.sb_agcount * rotorstep); else mp->m_agfrotor = (args->agno * rotorstep + 1) % (mp->m_sb.sb_agcount * rotorstep); } break; default: ASSERT(0); /* NOTREACHED */ } if (args->agbno == NULLAGBLOCK) args->fsbno = NULLFSBLOCK; else { args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno); #ifdef DEBUG ASSERT(args->len >= args->minlen); ASSERT(args->len <= args->maxlen); ASSERT(args->agbno % args->alignment == 0); XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len); #endif } xfs_perag_put(args->pag); return 0; error0: xfs_perag_put(args->pag); return error; } /* * Free an extent. * Just break up the extent address and hand off to xfs_free_ag_extent * after fixing up the freelist. */ int /* error */ xfs_free_extent( xfs_trans_t *tp, /* transaction pointer */ xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len) /* length of extent */ { xfs_alloc_arg_t args; int error; ASSERT(len != 0); memset(&args, 0, sizeof(xfs_alloc_arg_t)); args.tp = tp; args.mp = tp->t_mountp; /* * validate that the block number is legal - the enables us to detect * and handle a silent filesystem corruption rather than crashing. */ args.agno = XFS_FSB_TO_AGNO(args.mp, bno); if (args.agno >= args.mp->m_sb.sb_agcount) return EFSCORRUPTED; args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); if (args.agbno >= args.mp->m_sb.sb_agblocks) return EFSCORRUPTED; args.pag = xfs_perag_get(args.mp, args.agno); ASSERT(args.pag); error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); if (error) goto error0; /* validate the extent size is legal now we have the agf locked */ if (args.agbno + len > be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) { error = EFSCORRUPTED; goto error0; } error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); if (!error) xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0); error0: xfs_perag_put(args.pag); return error; }
gpl-2.0
Dees-Troy/android_kernel_samsung_coreprimelte
drivers/media/i2c/ad9389b.c
2084
38285
/* * Analog Devices AD9389B/AD9889B video encoder driver * * Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * References (c = chapter, p = page): * REF_01 - Analog Devices, Programming Guide, AD9889B/AD9389B, * HDMI Transitter, Rev. A, October 2010 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/workqueue.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/ad9389b.h> static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "debug level (0-2)"); MODULE_DESCRIPTION("Analog Devices AD9389B/AD9889B video encoder driver"); MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>"); MODULE_AUTHOR("Martin Bugge <marbugge@cisco.com>"); MODULE_LICENSE("GPL"); #define MASK_AD9389B_EDID_RDY_INT 0x04 #define MASK_AD9389B_MSEN_INT 0x40 #define MASK_AD9389B_HPD_INT 0x80 #define MASK_AD9389B_HPD_DETECT 0x40 #define MASK_AD9389B_MSEN_DETECT 0x20 #define MASK_AD9389B_EDID_RDY 0x10 #define EDID_MAX_RETRIES (8) #define EDID_DELAY 250 #define EDID_MAX_SEGM 8 /* ********************************************************************** * * Arrays with configuration parameters for the AD9389B * ********************************************************************** */ struct i2c_reg_value { u8 reg; u8 value; }; struct ad9389b_state_edid { /* total number of blocks */ u32 blocks; /* Number of segments read */ u32 segments; u8 data[EDID_MAX_SEGM * 256]; /* Number of EDID read retries left */ unsigned read_retries; }; struct ad9389b_state { struct ad9389b_platform_data pdata; struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler hdl; int chip_revision; /* Is the ad9389b powered on? */ bool power_on; /* Did we receive hotplug and rx-sense signals? */ bool have_monitor; /* timings from s_dv_timings */ struct v4l2_dv_timings dv_timings; /* controls */ struct v4l2_ctrl *hdmi_mode_ctrl; struct v4l2_ctrl *hotplug_ctrl; struct v4l2_ctrl *rx_sense_ctrl; struct v4l2_ctrl *have_edid0_ctrl; struct v4l2_ctrl *rgb_quantization_range_ctrl; struct i2c_client *edid_i2c_client; struct ad9389b_state_edid edid; /* Running counter of the number of detected EDIDs (for debugging) */ unsigned edid_detect_counter; struct workqueue_struct *work_queue; struct delayed_work edid_handler; /* work entry */ }; static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd); static bool ad9389b_check_edid_status(struct v4l2_subdev *sd); static void ad9389b_setup(struct v4l2_subdev *sd); static int ad9389b_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq); static int ad9389b_s_clock_freq(struct v4l2_subdev *sd, u32 freq); static inline struct ad9389b_state *get_ad9389b_state(struct v4l2_subdev *sd) { return container_of(sd, struct ad9389b_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct ad9389b_state, hdl)->sd; } /* ------------------------ I2C ----------------------------------------------- */ static int ad9389b_rd(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int ad9389b_wr(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; int i; for (i = 0; i < 3; i++) { ret = i2c_smbus_write_byte_data(client, reg, val); if (ret == 0) return 0; } v4l2_err(sd, "I2C Write Problem\n"); return ret; } /* To set specific bits in the register, a clear-mask is given (to be AND-ed), and then the value-mask (to be OR-ed). */ static inline void ad9389b_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask) { ad9389b_wr(sd, reg, (ad9389b_rd(sd, reg) & clr_mask) | val_mask); } static void ad9389b_edid_rd(struct v4l2_subdev *sd, u16 len, u8 *buf) { struct ad9389b_state *state = get_ad9389b_state(sd); int i; v4l2_dbg(1, debug, sd, "%s:\n", __func__); for (i = 0; i < len; i++) buf[i] = i2c_smbus_read_byte_data(state->edid_i2c_client, i); } static inline bool ad9389b_have_hotplug(struct v4l2_subdev *sd) { return ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT; } static inline bool ad9389b_have_rx_sense(struct v4l2_subdev *sd) { return ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT; } static void ad9389b_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode) { ad9389b_wr_and_or(sd, 0x17, 0xe7, (mode & 0x3)<<3); ad9389b_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5); } static void ad9389b_csc_coeff(struct v4l2_subdev *sd, u16 A1, u16 A2, u16 A3, u16 A4, u16 B1, u16 B2, u16 B3, u16 B4, u16 C1, u16 C2, u16 C3, u16 C4) { /* A */ ad9389b_wr_and_or(sd, 0x18, 0xe0, A1>>8); ad9389b_wr(sd, 0x19, A1); ad9389b_wr_and_or(sd, 0x1A, 0xe0, A2>>8); ad9389b_wr(sd, 0x1B, A2); ad9389b_wr_and_or(sd, 0x1c, 0xe0, A3>>8); ad9389b_wr(sd, 0x1d, A3); ad9389b_wr_and_or(sd, 0x1e, 0xe0, A4>>8); ad9389b_wr(sd, 0x1f, A4); /* B */ ad9389b_wr_and_or(sd, 0x20, 0xe0, B1>>8); ad9389b_wr(sd, 0x21, B1); ad9389b_wr_and_or(sd, 0x22, 0xe0, B2>>8); ad9389b_wr(sd, 0x23, B2); ad9389b_wr_and_or(sd, 0x24, 0xe0, B3>>8); ad9389b_wr(sd, 0x25, B3); ad9389b_wr_and_or(sd, 0x26, 0xe0, B4>>8); ad9389b_wr(sd, 0x27, B4); /* C */ ad9389b_wr_and_or(sd, 0x28, 0xe0, C1>>8); ad9389b_wr(sd, 0x29, C1); ad9389b_wr_and_or(sd, 0x2A, 0xe0, C2>>8); ad9389b_wr(sd, 0x2B, C2); ad9389b_wr_and_or(sd, 0x2C, 0xe0, C3>>8); ad9389b_wr(sd, 0x2D, C3); ad9389b_wr_and_or(sd, 0x2E, 0xe0, C4>>8); ad9389b_wr(sd, 0x2F, C4); } static void ad9389b_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable) { if (enable) { u8 csc_mode = 0; ad9389b_csc_conversion_mode(sd, csc_mode); ad9389b_csc_coeff(sd, 4096-564, 0, 0, 256, 0, 4096-564, 0, 256, 0, 0, 4096-564, 256); /* enable CSC */ ad9389b_wr_and_or(sd, 0x3b, 0xfe, 0x1); /* AVI infoframe: Limited range RGB (16-235) */ ad9389b_wr_and_or(sd, 0xcd, 0xf9, 0x02); } else { /* disable CSC */ ad9389b_wr_and_or(sd, 0x3b, 0xfe, 0x0); /* AVI infoframe: Full range RGB (0-255) */ ad9389b_wr_and_or(sd, 0xcd, 0xf9, 0x04); } } static void ad9389b_set_IT_content_AVI_InfoFrame(struct v4l2_subdev *sd) { struct ad9389b_state *state = get_ad9389b_state(sd); if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) { /* CEA format, not IT */ ad9389b_wr_and_or(sd, 0xcd, 0xbf, 0x00); } else { /* IT format */ ad9389b_wr_and_or(sd, 0xcd, 0xbf, 0x40); } } static int ad9389b_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl) { struct ad9389b_state *state = get_ad9389b_state(sd); switch (ctrl->val) { case V4L2_DV_RGB_RANGE_AUTO: /* automatic */ if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) { /* cea format, RGB limited range (16-235) */ ad9389b_csc_rgb_full2limit(sd, true); } else { /* not cea format, RGB full range (0-255) */ ad9389b_csc_rgb_full2limit(sd, false); } break; case V4L2_DV_RGB_RANGE_LIMITED: /* RGB limited range (16-235) */ ad9389b_csc_rgb_full2limit(sd, true); break; case V4L2_DV_RGB_RANGE_FULL: /* RGB full range (0-255) */ ad9389b_csc_rgb_full2limit(sd, false); break; default: return -EINVAL; } return 0; } static void ad9389b_set_manual_pll_gear(struct v4l2_subdev *sd, u32 pixelclock) { u8 gear; /* Workaround for TMDS PLL problem * The TMDS PLL in AD9389b change gear when the chip is heated above a * certain temperature. The output is disabled when the PLL change gear * so the monitor has to lock on the signal again. A workaround for * this is to use the manual PLL gears. This is a solution from Analog * Devices that is not documented in the datasheets. * 0x98 [7] = enable manual gearing. 0x98 [6:4] = gear * * The pixel frequency ranges are based on readout of the gear the * automatic gearing selects for different pixel clocks * (read from 0x9e [3:1]). */ if (pixelclock > 140000000) gear = 0xc0; /* 4th gear */ else if (pixelclock > 117000000) gear = 0xb0; /* 3rd gear */ else if (pixelclock > 87000000) gear = 0xa0; /* 2nd gear */ else if (pixelclock > 60000000) gear = 0x90; /* 1st gear */ else gear = 0x80; /* 0th gear */ ad9389b_wr_and_or(sd, 0x98, 0x0f, gear); } /* ------------------------------ CTRL OPS ------------------------------ */ static int ad9389b_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct ad9389b_state *state = get_ad9389b_state(sd); v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val); if (state->hdmi_mode_ctrl == ctrl) { /* Set HDMI or DVI-D */ ad9389b_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00); return 0; } if (state->rgb_quantization_range_ctrl == ctrl) return ad9389b_set_rgb_quantization_mode(sd, ctrl); return -EINVAL; } static const struct v4l2_ctrl_ops ad9389b_ctrl_ops = { .s_ctrl = ad9389b_s_ctrl, }; /* ---------------------------- CORE OPS ------------------------------------------- */ #ifdef CONFIG_VIDEO_ADV_DEBUG static int ad9389b_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = ad9389b_rd(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int ad9389b_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ad9389b_wr(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static int ad9389b_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_AD9389B, 0); } static int ad9389b_log_status(struct v4l2_subdev *sd) { struct ad9389b_state *state = get_ad9389b_state(sd); struct ad9389b_state_edid *edid = &state->edid; static const char * const states[] = { "in reset", "reading EDID", "idle", "initializing HDCP", "HDCP enabled", "initializing HDCP repeater", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F" }; static const char * const errors[] = { "no error", "bad receiver BKSV", "Ri mismatch", "Pj mismatch", "i2c error", "timed out", "max repeater cascade exceeded", "hash check failed", "too many devices", "9", "A", "B", "C", "D", "E", "F" }; u8 manual_gear; v4l2_info(sd, "chip revision %d\n", state->chip_revision); v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off"); v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n", (ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ? "detected" : "no", (ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ? "detected" : "no", edid->segments ? "found" : "no", edid->blocks); if (state->have_monitor) { v4l2_info(sd, "%s output %s\n", (ad9389b_rd(sd, 0xaf) & 0x02) ? "HDMI" : "DVI-D", (ad9389b_rd(sd, 0xa1) & 0x3c) ? "disabled" : "enabled"); } v4l2_info(sd, "ad9389b: %s\n", (ad9389b_rd(sd, 0xb8) & 0x40) ? "encrypted" : "no encryption"); v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n", states[ad9389b_rd(sd, 0xc8) & 0xf], errors[ad9389b_rd(sd, 0xc8) >> 4], state->edid_detect_counter, ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96)); manual_gear = ad9389b_rd(sd, 0x98) & 0x80; v4l2_info(sd, "ad9389b: RGB quantization: %s range\n", ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full"); v4l2_info(sd, "ad9389b: %s gear %d\n", manual_gear ? "manual" : "automatic", manual_gear ? ((ad9389b_rd(sd, 0x98) & 0x70) >> 4) : ((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1)); if (state->have_monitor) { if (ad9389b_rd(sd, 0xaf) & 0x02) { /* HDMI only */ u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80; u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 | ad9389b_rd(sd, 0x02) << 8 | ad9389b_rd(sd, 0x03); u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2; u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f; u32 CTS; if (manual_cts) CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 | ad9389b_rd(sd, 0x08) << 8 | ad9389b_rd(sd, 0x09); else CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 | ad9389b_rd(sd, 0x05) << 8 | ad9389b_rd(sd, 0x06); N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 | ad9389b_rd(sd, 0x02) << 8 | ad9389b_rd(sd, 0x03); v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n", manual_cts ? "manual" : "automatic", N, CTS); v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n", vic_detect, vic_sent); } } if (state->dv_timings.type == V4L2_DV_BT_656_1120) { struct v4l2_bt_timings *bt = bt = &state->dv_timings.bt; u32 frame_width = bt->width + bt->hfrontporch + bt->hsync + bt->hbackporch; u32 frame_height = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch; u32 frame_size = frame_width * frame_height; v4l2_info(sd, "timings: %ux%u%s%u (%ux%u). Pix freq. = %u Hz. Polarities = 0x%x\n", bt->width, bt->height, bt->interlaced ? "i" : "p", frame_size > 0 ? (unsigned)bt->pixelclock / frame_size : 0, frame_width, frame_height, (unsigned)bt->pixelclock, bt->polarities); } else { v4l2_info(sd, "no timings set\n"); } return 0; } /* Power up/down ad9389b */ static int ad9389b_s_power(struct v4l2_subdev *sd, int on) { struct ad9389b_state *state = get_ad9389b_state(sd); struct ad9389b_platform_data *pdata = &state->pdata; const int retries = 20; int i; v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off"); state->power_on = on; if (!on) { /* Power down */ ad9389b_wr_and_or(sd, 0x41, 0xbf, 0x40); return true; } /* Power up */ /* The ad9389b does not always come up immediately. Retry multiple times. */ for (i = 0; i < retries; i++) { ad9389b_wr_and_or(sd, 0x41, 0xbf, 0x0); if ((ad9389b_rd(sd, 0x41) & 0x40) == 0) break; ad9389b_wr_and_or(sd, 0x41, 0xbf, 0x40); msleep(10); } if (i == retries) { v4l2_dbg(1, debug, sd, "failed to powerup the ad9389b\n"); ad9389b_s_power(sd, 0); return false; } if (i > 1) v4l2_dbg(1, debug, sd, "needed %d retries to powerup the ad9389b\n", i); /* Select chip: AD9389B */ ad9389b_wr_and_or(sd, 0xba, 0xef, 0x10); /* Reserved registers that must be set according to REF_01 p. 11*/ ad9389b_wr_and_or(sd, 0x98, 0xf0, 0x07); ad9389b_wr(sd, 0x9c, 0x38); ad9389b_wr_and_or(sd, 0x9d, 0xfc, 0x01); /* Differential output drive strength */ if (pdata->diff_data_drive_strength > 0) ad9389b_wr(sd, 0xa2, pdata->diff_data_drive_strength); else ad9389b_wr(sd, 0xa2, 0x87); if (pdata->diff_clk_drive_strength > 0) ad9389b_wr(sd, 0xa3, pdata->diff_clk_drive_strength); else ad9389b_wr(sd, 0xa3, 0x87); ad9389b_wr(sd, 0x0a, 0x01); ad9389b_wr(sd, 0xbb, 0xff); /* Set number of attempts to read the EDID */ ad9389b_wr(sd, 0xc9, 0xf); return true; } /* Enable interrupts */ static void ad9389b_set_isr(struct v4l2_subdev *sd, bool enable) { u8 irqs = MASK_AD9389B_HPD_INT | MASK_AD9389B_MSEN_INT; u8 irqs_rd; int retries = 100; /* The datasheet says that the EDID ready interrupt should be disabled if there is no hotplug. */ if (!enable) irqs = 0; else if (ad9389b_have_hotplug(sd)) irqs |= MASK_AD9389B_EDID_RDY_INT; /* * This i2c write can fail (approx. 1 in 1000 writes). But it * is essential that this register is correct, so retry it * multiple times. * * Note that the i2c write does not report an error, but the readback * clearly shows the wrong value. */ do { ad9389b_wr(sd, 0x94, irqs); irqs_rd = ad9389b_rd(sd, 0x94); } while (retries-- && irqs_rd != irqs); if (irqs_rd != irqs) v4l2_err(sd, "Could not set interrupts: hw failure?\n"); } /* Interrupt handler */ static int ad9389b_isr(struct v4l2_subdev *sd, u32 status, bool *handled) { u8 irq_status; /* disable interrupts to prevent a race condition */ ad9389b_set_isr(sd, false); irq_status = ad9389b_rd(sd, 0x96); /* clear detected interrupts */ ad9389b_wr(sd, 0x96, irq_status); if (irq_status & (MASK_AD9389B_HPD_INT | MASK_AD9389B_MSEN_INT)) ad9389b_check_monitor_present_status(sd); if (irq_status & MASK_AD9389B_EDID_RDY_INT) ad9389b_check_edid_status(sd); /* enable interrupts */ ad9389b_set_isr(sd, true); *handled = true; return 0; } static const struct v4l2_subdev_core_ops ad9389b_core_ops = { .log_status = ad9389b_log_status, .g_chip_ident = ad9389b_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = ad9389b_g_register, .s_register = ad9389b_s_register, #endif .s_power = ad9389b_s_power, .interrupt_service_routine = ad9389b_isr, }; /* ------------------------------ PAD OPS ------------------------------ */ static int ad9389b_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { struct ad9389b_state *state = get_ad9389b_state(sd); if (edid->pad != 0) return -EINVAL; if (edid->blocks == 0 || edid->blocks > 256) return -EINVAL; if (!edid->edid) return -EINVAL; if (!state->edid.segments) { v4l2_dbg(1, debug, sd, "EDID segment 0 not found\n"); return -ENODATA; } if (edid->start_block >= state->edid.segments * 2) return -E2BIG; if (edid->blocks + edid->start_block >= state->edid.segments * 2) edid->blocks = state->edid.segments * 2 - edid->start_block; memcpy(edid->edid, &state->edid.data[edid->start_block * 128], 128 * edid->blocks); return 0; } static const struct v4l2_subdev_pad_ops ad9389b_pad_ops = { .get_edid = ad9389b_get_edid, }; /* ------------------------------ VIDEO OPS ------------------------------ */ /* Enable/disable ad9389b output */ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable) { struct ad9389b_state *state = get_ad9389b_state(sd); v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis")); ad9389b_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c)); if (enable) { ad9389b_check_monitor_present_status(sd); } else { ad9389b_s_power(sd, 0); state->have_monitor = false; } return 0; } static const struct v4l2_dv_timings ad9389b_timings[] = { V4L2_DV_BT_CEA_720X480P59_94, V4L2_DV_BT_CEA_720X576P50, V4L2_DV_BT_CEA_1280X720P24, V4L2_DV_BT_CEA_1280X720P25, V4L2_DV_BT_CEA_1280X720P30, V4L2_DV_BT_CEA_1280X720P50, V4L2_DV_BT_CEA_1280X720P60, V4L2_DV_BT_CEA_1920X1080P24, V4L2_DV_BT_CEA_1920X1080P25, V4L2_DV_BT_CEA_1920X1080P30, V4L2_DV_BT_CEA_1920X1080P50, V4L2_DV_BT_CEA_1920X1080P60, V4L2_DV_BT_DMT_640X350P85, V4L2_DV_BT_DMT_640X400P85, V4L2_DV_BT_DMT_720X400P85, V4L2_DV_BT_DMT_640X480P60, V4L2_DV_BT_DMT_640X480P72, V4L2_DV_BT_DMT_640X480P75, V4L2_DV_BT_DMT_640X480P85, V4L2_DV_BT_DMT_800X600P56, V4L2_DV_BT_DMT_800X600P60, V4L2_DV_BT_DMT_800X600P72, V4L2_DV_BT_DMT_800X600P75, V4L2_DV_BT_DMT_800X600P85, V4L2_DV_BT_DMT_848X480P60, V4L2_DV_BT_DMT_1024X768P60, V4L2_DV_BT_DMT_1024X768P70, V4L2_DV_BT_DMT_1024X768P75, V4L2_DV_BT_DMT_1024X768P85, V4L2_DV_BT_DMT_1152X864P75, V4L2_DV_BT_DMT_1280X768P60_RB, V4L2_DV_BT_DMT_1280X768P60, V4L2_DV_BT_DMT_1280X768P75, V4L2_DV_BT_DMT_1280X768P85, V4L2_DV_BT_DMT_1280X800P60_RB, V4L2_DV_BT_DMT_1280X800P60, V4L2_DV_BT_DMT_1280X800P75, V4L2_DV_BT_DMT_1280X800P85, V4L2_DV_BT_DMT_1280X960P60, V4L2_DV_BT_DMT_1280X960P85, V4L2_DV_BT_DMT_1280X1024P60, V4L2_DV_BT_DMT_1280X1024P75, V4L2_DV_BT_DMT_1280X1024P85, V4L2_DV_BT_DMT_1360X768P60, V4L2_DV_BT_DMT_1400X1050P60_RB, V4L2_DV_BT_DMT_1400X1050P60, V4L2_DV_BT_DMT_1400X1050P75, V4L2_DV_BT_DMT_1400X1050P85, V4L2_DV_BT_DMT_1440X900P60_RB, V4L2_DV_BT_DMT_1440X900P60, V4L2_DV_BT_DMT_1600X1200P60, V4L2_DV_BT_DMT_1680X1050P60_RB, V4L2_DV_BT_DMT_1680X1050P60, V4L2_DV_BT_DMT_1792X1344P60, V4L2_DV_BT_DMT_1856X1392P60, V4L2_DV_BT_DMT_1920X1200P60_RB, V4L2_DV_BT_DMT_1366X768P60, V4L2_DV_BT_DMT_1920X1080P60, {}, }; static int ad9389b_s_dv_timings(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings) { struct ad9389b_state *state = get_ad9389b_state(sd); int i; v4l2_dbg(1, debug, sd, "%s:\n", __func__); /* quick sanity check */ if (timings->type != V4L2_DV_BT_656_1120) return -EINVAL; if (timings->bt.interlaced) return -EINVAL; if (timings->bt.pixelclock < 27000000 || timings->bt.pixelclock > 170000000) return -EINVAL; /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings if the format is listed in ad9389b_timings[] */ for (i = 0; ad9389b_timings[i].bt.width; i++) { if (v4l_match_dv_timings(timings, &ad9389b_timings[i], 0)) { *timings = ad9389b_timings[i]; break; } } timings->bt.flags &= ~V4L2_DV_FL_REDUCED_FPS; /* save timings */ state->dv_timings = *timings; /* update quantization range based on new dv_timings */ ad9389b_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl); /* update PLL gear based on new dv_timings */ if (state->pdata.tmds_pll_gear == AD9389B_TMDS_PLL_GEAR_SEMI_AUTOMATIC) ad9389b_set_manual_pll_gear(sd, (u32)timings->bt.pixelclock); /* update AVI infoframe */ ad9389b_set_IT_content_AVI_InfoFrame(sd); return 0; } static int ad9389b_g_dv_timings(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings) { struct ad9389b_state *state = get_ad9389b_state(sd); v4l2_dbg(1, debug, sd, "%s:\n", __func__); if (!timings) return -EINVAL; *timings = state->dv_timings; return 0; } static int ad9389b_enum_dv_timings(struct v4l2_subdev *sd, struct v4l2_enum_dv_timings *timings) { if (timings->index >= ARRAY_SIZE(ad9389b_timings)) return -EINVAL; memset(timings->reserved, 0, sizeof(timings->reserved)); timings->timings = ad9389b_timings[timings->index]; return 0; } static int ad9389b_dv_timings_cap(struct v4l2_subdev *sd, struct v4l2_dv_timings_cap *cap) { cap->type = V4L2_DV_BT_656_1120; cap->bt.max_width = 1920; cap->bt.max_height = 1200; cap->bt.min_pixelclock = 27000000; cap->bt.max_pixelclock = 170000000; cap->bt.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT; cap->bt.capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM; return 0; } static const struct v4l2_subdev_video_ops ad9389b_video_ops = { .s_stream = ad9389b_s_stream, .s_dv_timings = ad9389b_s_dv_timings, .g_dv_timings = ad9389b_g_dv_timings, .enum_dv_timings = ad9389b_enum_dv_timings, .dv_timings_cap = ad9389b_dv_timings_cap, }; static int ad9389b_s_audio_stream(struct v4l2_subdev *sd, int enable) { v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis")); if (enable) ad9389b_wr_and_or(sd, 0x45, 0x3f, 0x80); else ad9389b_wr_and_or(sd, 0x45, 0x3f, 0x40); return 0; } static int ad9389b_s_clock_freq(struct v4l2_subdev *sd, u32 freq) { u32 N; switch (freq) { case 32000: N = 4096; break; case 44100: N = 6272; break; case 48000: N = 6144; break; case 88200: N = 12544; break; case 96000: N = 12288; break; case 176400: N = 25088; break; case 192000: N = 24576; break; default: return -EINVAL; } /* Set N (used with CTS to regenerate the audio clock) */ ad9389b_wr(sd, 0x01, (N >> 16) & 0xf); ad9389b_wr(sd, 0x02, (N >> 8) & 0xff); ad9389b_wr(sd, 0x03, N & 0xff); return 0; } static int ad9389b_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq) { u32 i2s_sf; switch (freq) { case 32000: i2s_sf = 0x30; break; case 44100: i2s_sf = 0x00; break; case 48000: i2s_sf = 0x20; break; case 88200: i2s_sf = 0x80; break; case 96000: i2s_sf = 0xa0; break; case 176400: i2s_sf = 0xc0; break; case 192000: i2s_sf = 0xe0; break; default: return -EINVAL; } /* Set sampling frequency for I2S audio to 48 kHz */ ad9389b_wr_and_or(sd, 0x15, 0xf, i2s_sf); return 0; } static int ad9389b_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { /* TODO based on input/output/config */ /* TODO See datasheet "Programmers guide" p. 39-40 */ /* Only 2 channels in use for application */ ad9389b_wr_and_or(sd, 0x50, 0x1f, 0x20); /* Speaker mapping */ ad9389b_wr(sd, 0x51, 0x00); /* TODO Where should this be placed? */ /* 16 bit audio word length */ ad9389b_wr_and_or(sd, 0x14, 0xf0, 0x02); return 0; } static const struct v4l2_subdev_audio_ops ad9389b_audio_ops = { .s_stream = ad9389b_s_audio_stream, .s_clock_freq = ad9389b_s_clock_freq, .s_i2s_clock_freq = ad9389b_s_i2s_clock_freq, .s_routing = ad9389b_s_routing, }; /* --------------------- SUBDEV OPS --------------------------------------- */ static const struct v4l2_subdev_ops ad9389b_ops = { .core = &ad9389b_core_ops, .video = &ad9389b_video_ops, .audio = &ad9389b_audio_ops, .pad = &ad9389b_pad_ops, }; /* ----------------------------------------------------------------------- */ static void ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf) { int i, j; if (debug < lvl) return; v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment); for (i = 0; i < 256; i += 16) { u8 b[128]; u8 *bp = b; if (i == 128) v4l2_dbg(lvl, debug, sd, "\n"); for (j = i; j < i + 16; j++) { sprintf(bp, "0x%02x, ", buf[j]); bp += 6; } bp[0] = '\0'; v4l2_dbg(lvl, debug, sd, "%s\n", b); } } static void ad9389b_edid_handler(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ad9389b_state *state = container_of(dwork, struct ad9389b_state, edid_handler); struct v4l2_subdev *sd = &state->sd; struct ad9389b_edid_detect ed; v4l2_dbg(1, debug, sd, "%s:\n", __func__); if (ad9389b_check_edid_status(sd)) { /* Return if we received the EDID. */ return; } if (ad9389b_have_hotplug(sd)) { /* We must retry reading the EDID several times, it is possible * that initially the EDID couldn't be read due to i2c errors * (DVI connectors are particularly prone to this problem). */ if (state->edid.read_retries) { state->edid.read_retries--; /* EDID read failed, trigger a retry */ ad9389b_wr(sd, 0xc9, 0xf); queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY); return; } } /* We failed to read the EDID, so send an event for this. */ ed.present = false; ed.segment = ad9389b_rd(sd, 0xc4); v4l2_subdev_notify(sd, AD9389B_EDID_DETECT, (void *)&ed); v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__); } static void ad9389b_audio_setup(struct v4l2_subdev *sd) { v4l2_dbg(1, debug, sd, "%s\n", __func__); ad9389b_s_i2s_clock_freq(sd, 48000); ad9389b_s_clock_freq(sd, 48000); ad9389b_s_routing(sd, 0, 0, 0); } /* Initial setup of AD9389b */ /* Configure hdmi transmitter. */ static void ad9389b_setup(struct v4l2_subdev *sd) { struct ad9389b_state *state = get_ad9389b_state(sd); v4l2_dbg(1, debug, sd, "%s\n", __func__); /* Input format: RGB 4:4:4 */ ad9389b_wr_and_or(sd, 0x15, 0xf1, 0x0); /* Output format: RGB 4:4:4 */ ad9389b_wr_and_or(sd, 0x16, 0x3f, 0x0); /* CSC fixed point: +/-2, 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */ ad9389b_wr_and_or(sd, 0x17, 0xe1, 0x0e); /* Disable pixel repetition and CSC */ ad9389b_wr_and_or(sd, 0x3b, 0x9e, 0x0); /* Output format: RGB 4:4:4, Active Format Information is valid. */ ad9389b_wr_and_or(sd, 0x45, 0xc7, 0x08); /* Underscanned */ ad9389b_wr_and_or(sd, 0x46, 0x3f, 0x80); /* Setup video format */ ad9389b_wr(sd, 0x3c, 0x0); /* Active format aspect ratio: same as picure. */ ad9389b_wr(sd, 0x47, 0x80); /* No encryption */ ad9389b_wr_and_or(sd, 0xaf, 0xef, 0x0); /* Positive clk edge capture for input video clock */ ad9389b_wr_and_or(sd, 0xba, 0x1f, 0x60); ad9389b_audio_setup(sd); v4l2_ctrl_handler_setup(&state->hdl); ad9389b_set_IT_content_AVI_InfoFrame(sd); } static void ad9389b_notify_monitor_detect(struct v4l2_subdev *sd) { struct ad9389b_monitor_detect mdt; struct ad9389b_state *state = get_ad9389b_state(sd); mdt.present = state->have_monitor; v4l2_subdev_notify(sd, AD9389B_MONITOR_DETECT, (void *)&mdt); } static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd) { struct ad9389b_state *state = get_ad9389b_state(sd); /* read hotplug and rx-sense state */ u8 status = ad9389b_rd(sd, 0x42); v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n", __func__, status, status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "", status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : ""); if ((status & MASK_AD9389B_HPD_DETECT) && ((status & MASK_AD9389B_MSEN_DETECT) || state->edid.segments)) { v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__); if (!state->have_monitor) { v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__); state->have_monitor = true; ad9389b_set_isr(sd, true); if (!ad9389b_s_power(sd, true)) { v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__); return; } ad9389b_setup(sd); ad9389b_notify_monitor_detect(sd); state->edid.read_retries = EDID_MAX_RETRIES; queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY); } } else if (status & MASK_AD9389B_HPD_DETECT) { v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__); state->edid.read_retries = EDID_MAX_RETRIES; queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY); } else if (!(status & MASK_AD9389B_HPD_DETECT)) { v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__); if (state->have_monitor) { v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__); state->have_monitor = false; ad9389b_notify_monitor_detect(sd); } ad9389b_s_power(sd, false); memset(&state->edid, 0, sizeof(struct ad9389b_state_edid)); } /* update read only ctrls */ v4l2_ctrl_s_ctrl(state->hotplug_ctrl, ad9389b_have_hotplug(sd) ? 0x1 : 0x0); v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, ad9389b_have_rx_sense(sd) ? 0x1 : 0x0); v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0); } static bool edid_block_verify_crc(u8 *edid_block) { int i; u8 sum = 0; for (i = 0; i < 127; i++) sum += *(edid_block + i); return ((255 - sum + 1) == edid_block[127]); } static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment) { struct ad9389b_state *state = get_ad9389b_state(sd); u32 blocks = state->edid.blocks; u8 *data = state->edid.data; if (edid_block_verify_crc(&data[segment * 256])) { if ((segment + 1) * 2 <= blocks) return edid_block_verify_crc(&data[segment * 256 + 128]); return true; } return false; } static bool ad9389b_check_edid_status(struct v4l2_subdev *sd) { struct ad9389b_state *state = get_ad9389b_state(sd); struct ad9389b_edid_detect ed; int segment; u8 edidRdy = ad9389b_rd(sd, 0xc5); v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n", __func__, EDID_MAX_RETRIES - state->edid.read_retries); if (!(edidRdy & MASK_AD9389B_EDID_RDY)) return false; segment = ad9389b_rd(sd, 0xc4); if (segment >= EDID_MAX_SEGM) { v4l2_err(sd, "edid segment number too big\n"); return false; } v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment); ad9389b_edid_rd(sd, 256, &state->edid.data[segment * 256]); ad9389b_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]); if (segment == 0) { state->edid.blocks = state->edid.data[0x7e] + 1; v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks); } if (!edid_segment_verify_crc(sd, segment)) { /* edid crc error, force reread of edid segment */ ad9389b_s_power(sd, false); ad9389b_s_power(sd, true); return false; } /* one more segment read ok */ state->edid.segments = segment + 1; if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) { /* Request next EDID segment */ v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments); ad9389b_wr(sd, 0xc9, 0xf); ad9389b_wr(sd, 0xc4, state->edid.segments); state->edid.read_retries = EDID_MAX_RETRIES; queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY); return false; } /* report when we have all segments but report only for segment 0 */ ed.present = true; ed.segment = 0; v4l2_subdev_notify(sd, AD9389B_EDID_DETECT, (void *)&ed); state->edid_detect_counter++; v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0); return ed.present; } /* ----------------------------------------------------------------------- */ static void ad9389b_init_setup(struct v4l2_subdev *sd) { struct ad9389b_state *state = get_ad9389b_state(sd); struct ad9389b_state_edid *edid = &state->edid; v4l2_dbg(1, debug, sd, "%s\n", __func__); /* clear all interrupts */ ad9389b_wr(sd, 0x96, 0xff); memset(edid, 0, sizeof(struct ad9389b_state_edid)); state->have_monitor = false; ad9389b_set_isr(sd, false); } static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct v4l2_dv_timings dv1080p60 = V4L2_DV_BT_CEA_1920X1080P60; struct ad9389b_state *state; struct ad9389b_platform_data *pdata = client->dev.platform_data; struct v4l2_ctrl_handler *hdl; struct v4l2_subdev *sd; int err = -EIO; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_dbg(1, debug, client, "detecting ad9389b client on address 0x%x\n", client->addr << 1); state = kzalloc(sizeof(struct ad9389b_state), GFP_KERNEL); if (!state) return -ENOMEM; /* Platform data */ if (pdata == NULL) { v4l_err(client, "No platform data!\n"); err = -ENODEV; goto err_free; } memcpy(&state->pdata, pdata, sizeof(state->pdata)); sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &ad9389b_ops); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; hdl = &state->hdl; v4l2_ctrl_handler_init(hdl, 5); /* private controls */ state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &ad9389b_ctrl_ops, V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI, 0, V4L2_DV_TX_MODE_DVI_D); state->hdmi_mode_ctrl->is_private = true; state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL, V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0); state->hotplug_ctrl->is_private = true; state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL, V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0); state->rx_sense_ctrl->is_private = true; state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL, V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0); state->have_edid0_ctrl->is_private = true; state->rgb_quantization_range_ctrl = v4l2_ctrl_new_std_menu(hdl, &ad9389b_ctrl_ops, V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL, 0, V4L2_DV_RGB_RANGE_AUTO); state->rgb_quantization_range_ctrl->is_private = true; sd->ctrl_handler = hdl; if (hdl->error) { err = hdl->error; goto err_hdl; } state->pad.flags = MEDIA_PAD_FL_SINK; err = media_entity_init(&sd->entity, 1, &state->pad, 0); if (err) goto err_hdl; state->chip_revision = ad9389b_rd(sd, 0x0); if (state->chip_revision != 2) { v4l2_err(sd, "chip_revision %d != 2\n", state->chip_revision); err = -EIO; goto err_entity; } v4l2_dbg(1, debug, sd, "reg 0x41 0x%x, chip version (reg 0x00) 0x%x\n", ad9389b_rd(sd, 0x41), state->chip_revision); state->edid_i2c_client = i2c_new_dummy(client->adapter, (0x7e>>1)); if (state->edid_i2c_client == NULL) { v4l2_err(sd, "failed to register edid i2c client\n"); goto err_entity; } state->work_queue = create_singlethread_workqueue(sd->name); if (state->work_queue == NULL) { v4l2_err(sd, "could not create workqueue\n"); goto err_unreg; } INIT_DELAYED_WORK(&state->edid_handler, ad9389b_edid_handler); state->dv_timings = dv1080p60; ad9389b_init_setup(sd); ad9389b_set_isr(sd, true); v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name, client->addr << 1, client->adapter->name); return 0; err_unreg: i2c_unregister_device(state->edid_i2c_client); err_entity: media_entity_cleanup(&sd->entity); err_hdl: v4l2_ctrl_handler_free(&state->hdl); err_free: kfree(state); return err; } /* ----------------------------------------------------------------------- */ static int ad9389b_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ad9389b_state *state = get_ad9389b_state(sd); state->chip_revision = -1; v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name, client->addr << 1, client->adapter->name); ad9389b_s_stream(sd, false); ad9389b_s_audio_stream(sd, false); ad9389b_init_setup(sd); cancel_delayed_work(&state->edid_handler); i2c_unregister_device(state->edid_i2c_client); destroy_workqueue(state->work_queue); v4l2_device_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); kfree(get_ad9389b_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static struct i2c_device_id ad9389b_id[] = { { "ad9389b", V4L2_IDENT_AD9389B }, { "ad9889b", V4L2_IDENT_AD9389B }, { } }; MODULE_DEVICE_TABLE(i2c, ad9389b_id); static struct i2c_driver ad9389b_driver = { .driver = { .owner = THIS_MODULE, .name = "ad9389b", }, .probe = ad9389b_probe, .remove = ad9389b_remove, .id_table = ad9389b_id, }; module_i2c_driver(ad9389b_driver);
gpl-2.0
rabeeh/linux-linaro-stable-mx6-unmaintained-will-be-deleted
drivers/power/abx500_chargalg.c
2084
62113
/* * Copyright (C) ST-Ericsson SA 2012 * Copyright (c) 2012 Sony Mobile Communications AB * * Charging algorithm driver for abx500 variants * * License Terms: GNU General Public License v2 * Authors: * Johan Palsson <johan.palsson@stericsson.com> * Karl Komierowski <karl.komierowski@stericsson.com> * Arun R Murthy <arun.murthy@stericsson.com> * Author: Imre Sunyi <imre.sunyi@sonymobile.com> */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/completion.h> #include <linux/workqueue.h> #include <linux/kobject.h> #include <linux/of.h> #include <linux/mfd/core.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/mfd/abx500/ux500_chargalg.h> #include <linux/mfd/abx500/ab8500-bm.h> #include <linux/notifier.h> /* Watchdog kick interval */ #define CHG_WD_INTERVAL (6 * HZ) /* End-of-charge criteria counter */ #define EOC_COND_CNT 10 /* One hour expressed in seconds */ #define ONE_HOUR_IN_SECONDS 3600 /* Five minutes expressed in seconds */ #define FIVE_MINUTES_IN_SECONDS 300 /* Plus margin for the low battery threshold */ #define BAT_PLUS_MARGIN (100) #define CHARGALG_CURR_STEP_LOW 0 #define CHARGALG_CURR_STEP_HIGH 100 #define to_abx500_chargalg_device_info(x) container_of((x), \ struct abx500_chargalg, chargalg_psy); enum abx500_chargers { NO_CHG, AC_CHG, USB_CHG, }; struct abx500_chargalg_charger_info { enum abx500_chargers conn_chg; enum abx500_chargers prev_conn_chg; enum abx500_chargers online_chg; enum abx500_chargers prev_online_chg; enum abx500_chargers charger_type; bool usb_chg_ok; bool ac_chg_ok; int usb_volt; int usb_curr; int ac_volt; int ac_curr; int usb_vset; int usb_iset; int ac_vset; int ac_iset; }; struct abx500_chargalg_suspension_status { bool suspended_change; bool ac_suspended; bool usb_suspended; }; struct abx500_chargalg_current_step_status { bool curr_step_change; int curr_step; }; struct abx500_chargalg_battery_data { int temp; int volt; int avg_curr; int inst_curr; int percent; }; enum abx500_chargalg_states { STATE_HANDHELD_INIT, STATE_HANDHELD, STATE_CHG_NOT_OK_INIT, STATE_CHG_NOT_OK, STATE_HW_TEMP_PROTECT_INIT, STATE_HW_TEMP_PROTECT, STATE_NORMAL_INIT, STATE_USB_PP_PRE_CHARGE, STATE_NORMAL, STATE_WAIT_FOR_RECHARGE_INIT, STATE_WAIT_FOR_RECHARGE, STATE_MAINTENANCE_A_INIT, STATE_MAINTENANCE_A, STATE_MAINTENANCE_B_INIT, STATE_MAINTENANCE_B, STATE_TEMP_UNDEROVER_INIT, STATE_TEMP_UNDEROVER, STATE_TEMP_LOWHIGH_INIT, STATE_TEMP_LOWHIGH, STATE_SUSPENDED_INIT, STATE_SUSPENDED, STATE_OVV_PROTECT_INIT, STATE_OVV_PROTECT, STATE_SAFETY_TIMER_EXPIRED_INIT, STATE_SAFETY_TIMER_EXPIRED, STATE_BATT_REMOVED_INIT, STATE_BATT_REMOVED, STATE_WD_EXPIRED_INIT, STATE_WD_EXPIRED, }; static const char *states[] = { "HANDHELD_INIT", "HANDHELD", "CHG_NOT_OK_INIT", "CHG_NOT_OK", "HW_TEMP_PROTECT_INIT", "HW_TEMP_PROTECT", "NORMAL_INIT", "USB_PP_PRE_CHARGE", "NORMAL", "WAIT_FOR_RECHARGE_INIT", "WAIT_FOR_RECHARGE", "MAINTENANCE_A_INIT", "MAINTENANCE_A", "MAINTENANCE_B_INIT", "MAINTENANCE_B", "TEMP_UNDEROVER_INIT", "TEMP_UNDEROVER", "TEMP_LOWHIGH_INIT", "TEMP_LOWHIGH", "SUSPENDED_INIT", "SUSPENDED", "OVV_PROTECT_INIT", "OVV_PROTECT", "SAFETY_TIMER_EXPIRED_INIT", "SAFETY_TIMER_EXPIRED", "BATT_REMOVED_INIT", "BATT_REMOVED", "WD_EXPIRED_INIT", "WD_EXPIRED", }; struct abx500_chargalg_events { bool batt_unknown; bool mainextchnotok; bool batt_ovv; bool batt_rem; bool btemp_underover; bool btemp_lowhigh; bool main_thermal_prot; bool usb_thermal_prot; bool main_ovv; bool vbus_ovv; bool usbchargernotok; bool safety_timer_expired; bool maintenance_timer_expired; bool ac_wd_expired; bool usb_wd_expired; bool ac_cv_active; bool usb_cv_active; bool vbus_collapsed; }; /** * struct abx500_charge_curr_maximization - Charger maximization parameters * @original_iset: the non optimized/maximised charger current * @current_iset: the charging current used at this moment * @test_delta_i: the delta between the current we want to charge and the current that is really going into the battery * @condition_cnt: number of iterations needed before a new charger current is set * @max_current: maximum charger current * @wait_cnt: to avoid too fast current step down in case of charger * voltage collapse, we insert this delay between step * down * @level: tells in how many steps the charging current has been increased */ struct abx500_charge_curr_maximization { int original_iset; int current_iset; int test_delta_i; int condition_cnt; int max_current; int wait_cnt; u8 level; }; enum maxim_ret { MAXIM_RET_NOACTION, MAXIM_RET_CHANGE, MAXIM_RET_IBAT_TOO_HIGH, }; /** * struct abx500_chargalg - abx500 Charging algorithm device information * @dev: pointer to the structure device * @charge_status: battery operating status * @eoc_cnt: counter used to determine end-of_charge * @maintenance_chg: indicate if maintenance charge is active * @t_hyst_norm temperature hysteresis when the temperature has been * over or under normal limits * @t_hyst_lowhigh temperature hysteresis when the temperature has been * over or under the high or low limits * @charge_state: current state of the charging algorithm * @ccm charging current maximization parameters * @chg_info: information about connected charger types * @batt_data: data of the battery * @susp_status: current charger suspension status * @bm: Platform specific battery management information * @curr_status: Current step status for over-current protection * @parent: pointer to the struct abx500 * @chargalg_psy: structure that holds the battery properties exposed by * the charging algorithm * @events: structure for information about events triggered * @chargalg_wq: work queue for running the charging algorithm * @chargalg_periodic_work: work to run the charging algorithm periodically * @chargalg_wd_work: work to kick the charger watchdog periodically * @chargalg_work: work to run the charging algorithm instantly * @safety_timer: charging safety timer * @maintenance_timer: maintenance charging timer * @chargalg_kobject: structure of type kobject */ struct abx500_chargalg { struct device *dev; int charge_status; int eoc_cnt; bool maintenance_chg; int t_hyst_norm; int t_hyst_lowhigh; enum abx500_chargalg_states charge_state; struct abx500_charge_curr_maximization ccm; struct abx500_chargalg_charger_info chg_info; struct abx500_chargalg_battery_data batt_data; struct abx500_chargalg_suspension_status susp_status; struct ab8500 *parent; struct abx500_chargalg_current_step_status curr_status; struct abx500_bm_data *bm; struct power_supply chargalg_psy; struct ux500_charger *ac_chg; struct ux500_charger *usb_chg; struct abx500_chargalg_events events; struct workqueue_struct *chargalg_wq; struct delayed_work chargalg_periodic_work; struct delayed_work chargalg_wd_work; struct work_struct chargalg_work; struct hrtimer safety_timer; struct hrtimer maintenance_timer; struct kobject chargalg_kobject; }; /*External charger prepare notifier*/ BLOCKING_NOTIFIER_HEAD(charger_notifier_list); /* Main battery properties */ static enum power_supply_property abx500_chargalg_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, }; struct abx500_chargalg_sysfs_entry { struct attribute attr; ssize_t (*show)(struct abx500_chargalg *, char *); ssize_t (*store)(struct abx500_chargalg *, const char *, size_t); }; /** * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer * @timer: pointer to the hrtimer structure * * This function gets called when the safety timer for the charger * expires */ static enum hrtimer_restart abx500_chargalg_safety_timer_expired(struct hrtimer *timer) { struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg, safety_timer); dev_err(di->dev, "Safety timer expired\n"); di->events.safety_timer_expired = true; /* Trigger execution of the algorithm instantly */ queue_work(di->chargalg_wq, &di->chargalg_work); return HRTIMER_NORESTART; } /** * abx500_chargalg_maintenance_timer_expired() - Expiration of * the maintenance timer * @timer: pointer to the timer structure * * This function gets called when the maintenence timer * expires */ static enum hrtimer_restart abx500_chargalg_maintenance_timer_expired(struct hrtimer *timer) { struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg, maintenance_timer); dev_dbg(di->dev, "Maintenance timer expired\n"); di->events.maintenance_timer_expired = true; /* Trigger execution of the algorithm instantly */ queue_work(di->chargalg_wq, &di->chargalg_work); return HRTIMER_NORESTART; } /** * abx500_chargalg_state_to() - Change charge state * @di: pointer to the abx500_chargalg structure * * This function gets called when a charge state change should occur */ static void abx500_chargalg_state_to(struct abx500_chargalg *di, enum abx500_chargalg_states state) { dev_dbg(di->dev, "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n", di->charge_state == state ? "NO" : "YES", di->charge_state, states[di->charge_state], state, states[state]); di->charge_state = state; } static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di) { switch (di->charge_state) { case STATE_NORMAL: case STATE_MAINTENANCE_A: case STATE_MAINTENANCE_B: break; default: return 0; } if (di->chg_info.charger_type & USB_CHG) { return di->usb_chg->ops.check_enable(di->usb_chg, di->bm->bat_type[di->bm->batt_id].normal_vol_lvl, di->bm->bat_type[di->bm->batt_id].normal_cur_lvl); } else if ((di->chg_info.charger_type & AC_CHG) && !(di->ac_chg->external)) { return di->ac_chg->ops.check_enable(di->ac_chg, di->bm->bat_type[di->bm->batt_id].normal_vol_lvl, di->bm->bat_type[di->bm->batt_id].normal_cur_lvl); } return 0; } /** * abx500_chargalg_check_charger_connection() - Check charger connection change * @di: pointer to the abx500_chargalg structure * * This function will check if there is a change in the charger connection * and change charge state accordingly. AC has precedence over USB. */ static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di) { if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg || di->susp_status.suspended_change) { /* * Charger state changed or suspension * has changed since last update */ if ((di->chg_info.conn_chg & AC_CHG) && !di->susp_status.ac_suspended) { dev_dbg(di->dev, "Charging source is AC\n"); if (di->chg_info.charger_type != AC_CHG) { di->chg_info.charger_type = AC_CHG; abx500_chargalg_state_to(di, STATE_NORMAL_INIT); } } else if ((di->chg_info.conn_chg & USB_CHG) && !di->susp_status.usb_suspended) { dev_dbg(di->dev, "Charging source is USB\n"); di->chg_info.charger_type = USB_CHG; abx500_chargalg_state_to(di, STATE_NORMAL_INIT); } else if (di->chg_info.conn_chg && (di->susp_status.ac_suspended || di->susp_status.usb_suspended)) { dev_dbg(di->dev, "Charging is suspended\n"); di->chg_info.charger_type = NO_CHG; abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT); } else { dev_dbg(di->dev, "Charging source is OFF\n"); di->chg_info.charger_type = NO_CHG; abx500_chargalg_state_to(di, STATE_HANDHELD_INIT); } di->chg_info.prev_conn_chg = di->chg_info.conn_chg; di->susp_status.suspended_change = false; } return di->chg_info.conn_chg; } /** * abx500_chargalg_check_current_step_status() - Check charging current * step status. * @di: pointer to the abx500_chargalg structure * * This function will check if there is a change in the charging current step * and change charge state accordingly. */ static void abx500_chargalg_check_current_step_status (struct abx500_chargalg *di) { if (di->curr_status.curr_step_change) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); di->curr_status.curr_step_change = false; } /** * abx500_chargalg_start_safety_timer() - Start charging safety timer * @di: pointer to the abx500_chargalg structure * * The safety timer is used to avoid overcharging of old or bad batteries. * There are different timers for AC and USB */ static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di) { /* Charger-dependent expiration time in hours*/ int timer_expiration = 0; switch (di->chg_info.charger_type) { case AC_CHG: timer_expiration = di->bm->main_safety_tmr_h; break; case USB_CHG: timer_expiration = di->bm->usb_safety_tmr_h; break; default: dev_err(di->dev, "Unknown charger to charge from\n"); break; } di->events.safety_timer_expired = false; hrtimer_set_expires_range(&di->safety_timer, ktime_set(timer_expiration * ONE_HOUR_IN_SECONDS, 0), ktime_set(FIVE_MINUTES_IN_SECONDS, 0)); hrtimer_start_expires(&di->safety_timer, HRTIMER_MODE_REL); } /** * abx500_chargalg_stop_safety_timer() - Stop charging safety timer * @di: pointer to the abx500_chargalg structure * * The safety timer is stopped whenever the NORMAL state is exited */ static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di) { if (hrtimer_try_to_cancel(&di->safety_timer) >= 0) di->events.safety_timer_expired = false; } /** * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer * @di: pointer to the abx500_chargalg structure * @duration: duration of ther maintenance timer in hours * * The maintenance timer is used to maintain the charge in the battery once * the battery is considered full. These timers are chosen to match the * discharge curve of the battery */ static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di, int duration) { hrtimer_set_expires_range(&di->maintenance_timer, ktime_set(duration * ONE_HOUR_IN_SECONDS, 0), ktime_set(FIVE_MINUTES_IN_SECONDS, 0)); di->events.maintenance_timer_expired = false; hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL); } /** * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer * @di: pointer to the abx500_chargalg structure * * The maintenance timer is stopped whenever maintenance ends or when another * state is entered */ static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di) { if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0) di->events.maintenance_timer_expired = false; } /** * abx500_chargalg_kick_watchdog() - Kick charger watchdog * @di: pointer to the abx500_chargalg structure * * The charger watchdog have to be kicked periodically whenever the charger is * on, else the ABB will reset the system */ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di) { /* Check if charger exists and kick watchdog if charging */ if (di->ac_chg && di->ac_chg->ops.kick_wd && di->chg_info.online_chg & AC_CHG) { /* * If AB charger watchdog expired, pm2xxx charging * gets disabled. To be safe, kick both AB charger watchdog * and pm2xxx watchdog. */ if (di->ac_chg->external && di->usb_chg && di->usb_chg->ops.kick_wd) di->usb_chg->ops.kick_wd(di->usb_chg); return di->ac_chg->ops.kick_wd(di->ac_chg); } else if (di->usb_chg && di->usb_chg->ops.kick_wd && di->chg_info.online_chg & USB_CHG) return di->usb_chg->ops.kick_wd(di->usb_chg); return -ENXIO; } /** * abx500_chargalg_ac_en() - Turn on/off the AC charger * @di: pointer to the abx500_chargalg structure * @enable: charger on/off * @vset: requested charger output voltage * @iset: requested charger output current * * The AC charger will be turned on/off with the requested charge voltage and * current */ static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable, int vset, int iset) { static int abx500_chargalg_ex_ac_enable_toggle; if (!di->ac_chg || !di->ac_chg->ops.enable) return -ENXIO; /* Select maximum of what both the charger and the battery supports */ if (di->ac_chg->max_out_volt) vset = min(vset, di->ac_chg->max_out_volt); if (di->ac_chg->max_out_curr) iset = min(iset, di->ac_chg->max_out_curr); di->chg_info.ac_iset = iset; di->chg_info.ac_vset = vset; /* Enable external charger */ if (enable && di->ac_chg->external && !abx500_chargalg_ex_ac_enable_toggle) { blocking_notifier_call_chain(&charger_notifier_list, 0, di->dev); abx500_chargalg_ex_ac_enable_toggle++; } return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset); } /** * abx500_chargalg_usb_en() - Turn on/off the USB charger * @di: pointer to the abx500_chargalg structure * @enable: charger on/off * @vset: requested charger output voltage * @iset: requested charger output current * * The USB charger will be turned on/off with the requested charge voltage and * current */ static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable, int vset, int iset) { if (!di->usb_chg || !di->usb_chg->ops.enable) return -ENXIO; /* Select maximum of what both the charger and the battery supports */ if (di->usb_chg->max_out_volt) vset = min(vset, di->usb_chg->max_out_volt); if (di->usb_chg->max_out_curr) iset = min(iset, di->usb_chg->max_out_curr); di->chg_info.usb_iset = iset; di->chg_info.usb_vset = vset; return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset); } /** * ab8540_chargalg_usb_pp_en() - Enable/ disable USB power path * @di: pointer to the abx500_chargalg structure * @enable: power path enable/disable * * The USB power path will be enable/ disable */ static int ab8540_chargalg_usb_pp_en(struct abx500_chargalg *di, bool enable) { if (!di->usb_chg || !di->usb_chg->ops.pp_enable) return -ENXIO; return di->usb_chg->ops.pp_enable(di->usb_chg, enable); } /** * ab8540_chargalg_usb_pre_chg_en() - Enable/ disable USB pre-charge * @di: pointer to the abx500_chargalg structure * @enable: USB pre-charge enable/disable * * The USB USB pre-charge will be enable/ disable */ static int ab8540_chargalg_usb_pre_chg_en(struct abx500_chargalg *di, bool enable) { if (!di->usb_chg || !di->usb_chg->ops.pre_chg_enable) return -ENXIO; return di->usb_chg->ops.pre_chg_enable(di->usb_chg, enable); } /** * abx500_chargalg_update_chg_curr() - Update charger current * @di: pointer to the abx500_chargalg structure * @iset: requested charger output current * * The charger output current will be updated for the charger * that is currently in use */ static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di, int iset) { /* Check if charger exists and update current if charging */ if (di->ac_chg && di->ac_chg->ops.update_curr && di->chg_info.charger_type & AC_CHG) { /* * Select maximum of what both the charger * and the battery supports */ if (di->ac_chg->max_out_curr) iset = min(iset, di->ac_chg->max_out_curr); di->chg_info.ac_iset = iset; return di->ac_chg->ops.update_curr(di->ac_chg, iset); } else if (di->usb_chg && di->usb_chg->ops.update_curr && di->chg_info.charger_type & USB_CHG) { /* * Select maximum of what both the charger * and the battery supports */ if (di->usb_chg->max_out_curr) iset = min(iset, di->usb_chg->max_out_curr); di->chg_info.usb_iset = iset; return di->usb_chg->ops.update_curr(di->usb_chg, iset); } return -ENXIO; } /** * abx500_chargalg_stop_charging() - Stop charging * @di: pointer to the abx500_chargalg structure * * This function is called from any state where charging should be stopped. * All charging is disabled and all status parameters and timers are changed * accordingly */ static void abx500_chargalg_stop_charging(struct abx500_chargalg *di) { abx500_chargalg_ac_en(di, false, 0, 0); abx500_chargalg_usb_en(di, false, 0, 0); abx500_chargalg_stop_safety_timer(di); abx500_chargalg_stop_maintenance_timer(di); di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING; di->maintenance_chg = false; cancel_delayed_work(&di->chargalg_wd_work); power_supply_changed(&di->chargalg_psy); } /** * abx500_chargalg_hold_charging() - Pauses charging * @di: pointer to the abx500_chargalg structure * * This function is called in the case where maintenance charging has been * disabled and instead a battery voltage mode is entered to check when the * battery voltage has reached a certain recharge voltage */ static void abx500_chargalg_hold_charging(struct abx500_chargalg *di) { abx500_chargalg_ac_en(di, false, 0, 0); abx500_chargalg_usb_en(di, false, 0, 0); abx500_chargalg_stop_safety_timer(di); abx500_chargalg_stop_maintenance_timer(di); di->charge_status = POWER_SUPPLY_STATUS_CHARGING; di->maintenance_chg = false; cancel_delayed_work(&di->chargalg_wd_work); power_supply_changed(&di->chargalg_psy); } /** * abx500_chargalg_start_charging() - Start the charger * @di: pointer to the abx500_chargalg structure * @vset: requested charger output voltage * @iset: requested charger output current * * A charger will be enabled depending on the requested charger type that was * detected previously. */ static void abx500_chargalg_start_charging(struct abx500_chargalg *di, int vset, int iset) { switch (di->chg_info.charger_type) { case AC_CHG: dev_dbg(di->dev, "AC parameters: Vset %d, Ich %d\n", vset, iset); abx500_chargalg_usb_en(di, false, 0, 0); abx500_chargalg_ac_en(di, true, vset, iset); break; case USB_CHG: dev_dbg(di->dev, "USB parameters: Vset %d, Ich %d\n", vset, iset); abx500_chargalg_ac_en(di, false, 0, 0); abx500_chargalg_usb_en(di, true, vset, iset); break; default: dev_err(di->dev, "Unknown charger to charge from\n"); break; } } /** * abx500_chargalg_check_temp() - Check battery temperature ranges * @di: pointer to the abx500_chargalg structure * * The battery temperature is checked against the predefined limits and the * charge state is changed accordingly */ static void abx500_chargalg_check_temp(struct abx500_chargalg *di) { if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) && di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) { /* Temp OK! */ di->events.btemp_underover = false; di->events.btemp_lowhigh = false; di->t_hyst_norm = 0; di->t_hyst_lowhigh = 0; } else { if (((di->batt_data.temp >= di->bm->temp_high) && (di->batt_data.temp < (di->bm->temp_over - di->t_hyst_lowhigh))) || ((di->batt_data.temp > (di->bm->temp_under + di->t_hyst_lowhigh)) && (di->batt_data.temp <= di->bm->temp_low))) { /* TEMP minor!!!!! */ di->events.btemp_underover = false; di->events.btemp_lowhigh = true; di->t_hyst_norm = di->bm->temp_hysteresis; di->t_hyst_lowhigh = 0; } else if (di->batt_data.temp <= di->bm->temp_under || di->batt_data.temp >= di->bm->temp_over) { /* TEMP major!!!!! */ di->events.btemp_underover = true; di->events.btemp_lowhigh = false; di->t_hyst_norm = 0; di->t_hyst_lowhigh = di->bm->temp_hysteresis; } else { /* Within hysteresis */ dev_dbg(di->dev, "Within hysteresis limit temp: %d " "hyst_lowhigh %d, hyst normal %d\n", di->batt_data.temp, di->t_hyst_lowhigh, di->t_hyst_norm); } } } /** * abx500_chargalg_check_charger_voltage() - Check charger voltage * @di: pointer to the abx500_chargalg structure * * Charger voltage is checked against maximum limit */ static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di) { if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max) di->chg_info.usb_chg_ok = false; else di->chg_info.usb_chg_ok = true; if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max) di->chg_info.ac_chg_ok = false; else di->chg_info.ac_chg_ok = true; } /** * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled * @di: pointer to the abx500_chargalg structure * * End-of-charge criteria is fulfilled when the battery voltage is above a * certain limit and the battery current is below a certain limit for a * predefined number of consecutive seconds. If true, the battery is full */ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di) { if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING && di->charge_state == STATE_NORMAL && !di->maintenance_chg && (di->batt_data.volt >= di->bm->bat_type[di->bm->batt_id].termination_vol || di->events.usb_cv_active || di->events.ac_cv_active) && di->batt_data.avg_curr < di->bm->bat_type[di->bm->batt_id].termination_curr && di->batt_data.avg_curr > 0) { if (++di->eoc_cnt >= EOC_COND_CNT) { di->eoc_cnt = 0; if ((di->chg_info.charger_type & USB_CHG) && (di->usb_chg->power_path)) ab8540_chargalg_usb_pp_en(di, true); di->charge_status = POWER_SUPPLY_STATUS_FULL; di->maintenance_chg = true; dev_dbg(di->dev, "EOC reached!\n"); power_supply_changed(&di->chargalg_psy); } else { dev_dbg(di->dev, " EOC limit reached for the %d" " time, out of %d before EOC\n", di->eoc_cnt, EOC_COND_CNT); } } else { di->eoc_cnt = 0; } } static void init_maxim_chg_curr(struct abx500_chargalg *di) { di->ccm.original_iset = di->bm->bat_type[di->bm->batt_id].normal_cur_lvl; di->ccm.current_iset = di->bm->bat_type[di->bm->batt_id].normal_cur_lvl; di->ccm.test_delta_i = di->bm->maxi->charger_curr_step; di->ccm.max_current = di->bm->maxi->chg_curr; di->ccm.condition_cnt = di->bm->maxi->wait_cycles; di->ccm.level = 0; } /** * abx500_chargalg_chg_curr_maxim - increases the charger current to * compensate for the system load * @di pointer to the abx500_chargalg structure * * This maximization function is used to raise the charger current to get the * battery current as close to the optimal value as possible. The battery * current during charging is affected by the system load */ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di) { int delta_i; if (!di->bm->maxi->ena_maxi) return MAXIM_RET_NOACTION; delta_i = di->ccm.original_iset - di->batt_data.inst_curr; if (di->events.vbus_collapsed) { dev_dbg(di->dev, "Charger voltage has collapsed %d\n", di->ccm.wait_cnt); if (di->ccm.wait_cnt == 0) { dev_dbg(di->dev, "lowering current\n"); di->ccm.wait_cnt++; di->ccm.condition_cnt = di->bm->maxi->wait_cycles; di->ccm.max_current = di->ccm.current_iset - di->ccm.test_delta_i; di->ccm.current_iset = di->ccm.max_current; di->ccm.level--; return MAXIM_RET_CHANGE; } else { dev_dbg(di->dev, "waiting\n"); /* Let's go in here twice before lowering curr again */ di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3; return MAXIM_RET_NOACTION; } } di->ccm.wait_cnt = 0; if ((di->batt_data.inst_curr > di->ccm.original_iset)) { dev_dbg(di->dev, " Maximization Ibat (%dmA) too high" " (limit %dmA) (current iset: %dmA)!\n", di->batt_data.inst_curr, di->ccm.original_iset, di->ccm.current_iset); if (di->ccm.current_iset == di->ccm.original_iset) return MAXIM_RET_NOACTION; di->ccm.condition_cnt = di->bm->maxi->wait_cycles; di->ccm.current_iset = di->ccm.original_iset; di->ccm.level = 0; return MAXIM_RET_IBAT_TOO_HIGH; } if (delta_i > di->ccm.test_delta_i && (di->ccm.current_iset + di->ccm.test_delta_i) < di->ccm.max_current) { if (di->ccm.condition_cnt-- == 0) { /* Increse the iset with cco.test_delta_i */ di->ccm.condition_cnt = di->bm->maxi->wait_cycles; di->ccm.current_iset += di->ccm.test_delta_i; di->ccm.level++; dev_dbg(di->dev, " Maximization needed, increase" " with %d mA to %dmA (Optimal ibat: %d)" " Level %d\n", di->ccm.test_delta_i, di->ccm.current_iset, di->ccm.original_iset, di->ccm.level); return MAXIM_RET_CHANGE; } else { return MAXIM_RET_NOACTION; } } else { di->ccm.condition_cnt = di->bm->maxi->wait_cycles; return MAXIM_RET_NOACTION; } } static void handle_maxim_chg_curr(struct abx500_chargalg *di) { enum maxim_ret ret; int result; ret = abx500_chargalg_chg_curr_maxim(di); switch (ret) { case MAXIM_RET_CHANGE: result = abx500_chargalg_update_chg_curr(di, di->ccm.current_iset); if (result) dev_err(di->dev, "failed to set chg curr\n"); break; case MAXIM_RET_IBAT_TOO_HIGH: result = abx500_chargalg_update_chg_curr(di, di->bm->bat_type[di->bm->batt_id].normal_cur_lvl); if (result) dev_err(di->dev, "failed to set chg curr\n"); break; case MAXIM_RET_NOACTION: default: /* Do nothing..*/ break; } } static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data) { struct power_supply *psy; struct power_supply *ext; struct abx500_chargalg *di; union power_supply_propval ret; int i, j; bool psy_found = false; bool capacity_updated = false; psy = (struct power_supply *)data; ext = dev_get_drvdata(dev); di = to_abx500_chargalg_device_info(psy); /* For all psy where the driver name appears in any supplied_to */ for (i = 0; i < ext->num_supplicants; i++) { if (!strcmp(ext->supplied_to[i], psy->name)) psy_found = true; } if (!psy_found) return 0; /* * If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its * property because of handling that sysfs entry on its own, this is * the place to get the battery capacity. */ if (!ext->get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) { di->batt_data.percent = ret.intval; capacity_updated = true; } /* Go through all properties for the psy */ for (j = 0; j < ext->num_properties; j++) { enum power_supply_property prop; prop = ext->properties[j]; /* Initialize chargers if not already done */ if (!di->ac_chg && ext->type == POWER_SUPPLY_TYPE_MAINS) di->ac_chg = psy_to_ux500_charger(ext); else if (!di->usb_chg && ext->type == POWER_SUPPLY_TYPE_USB) di->usb_chg = psy_to_ux500_charger(ext); if (ext->get_property(ext, prop, &ret)) continue; switch (prop) { case POWER_SUPPLY_PROP_PRESENT: switch (ext->type) { case POWER_SUPPLY_TYPE_BATTERY: /* Battery present */ if (ret.intval) di->events.batt_rem = false; /* Battery removed */ else di->events.batt_rem = true; break; case POWER_SUPPLY_TYPE_MAINS: /* AC disconnected */ if (!ret.intval && (di->chg_info.conn_chg & AC_CHG)) { di->chg_info.prev_conn_chg = di->chg_info.conn_chg; di->chg_info.conn_chg &= ~AC_CHG; } /* AC connected */ else if (ret.intval && !(di->chg_info.conn_chg & AC_CHG)) { di->chg_info.prev_conn_chg = di->chg_info.conn_chg; di->chg_info.conn_chg |= AC_CHG; } break; case POWER_SUPPLY_TYPE_USB: /* USB disconnected */ if (!ret.intval && (di->chg_info.conn_chg & USB_CHG)) { di->chg_info.prev_conn_chg = di->chg_info.conn_chg; di->chg_info.conn_chg &= ~USB_CHG; } /* USB connected */ else if (ret.intval && !(di->chg_info.conn_chg & USB_CHG)) { di->chg_info.prev_conn_chg = di->chg_info.conn_chg; di->chg_info.conn_chg |= USB_CHG; } break; default: break; } break; case POWER_SUPPLY_PROP_ONLINE: switch (ext->type) { case POWER_SUPPLY_TYPE_BATTERY: break; case POWER_SUPPLY_TYPE_MAINS: /* AC offline */ if (!ret.intval && (di->chg_info.online_chg & AC_CHG)) { di->chg_info.prev_online_chg = di->chg_info.online_chg; di->chg_info.online_chg &= ~AC_CHG; } /* AC online */ else if (ret.intval && !(di->chg_info.online_chg & AC_CHG)) { di->chg_info.prev_online_chg = di->chg_info.online_chg; di->chg_info.online_chg |= AC_CHG; queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0); } break; case POWER_SUPPLY_TYPE_USB: /* USB offline */ if (!ret.intval && (di->chg_info.online_chg & USB_CHG)) { di->chg_info.prev_online_chg = di->chg_info.online_chg; di->chg_info.online_chg &= ~USB_CHG; } /* USB online */ else if (ret.intval && !(di->chg_info.online_chg & USB_CHG)) { di->chg_info.prev_online_chg = di->chg_info.online_chg; di->chg_info.online_chg |= USB_CHG; queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0); } break; default: break; } break; case POWER_SUPPLY_PROP_HEALTH: switch (ext->type) { case POWER_SUPPLY_TYPE_BATTERY: break; case POWER_SUPPLY_TYPE_MAINS: switch (ret.intval) { case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE: di->events.mainextchnotok = true; di->events.main_thermal_prot = false; di->events.main_ovv = false; di->events.ac_wd_expired = false; break; case POWER_SUPPLY_HEALTH_DEAD: di->events.ac_wd_expired = true; di->events.mainextchnotok = false; di->events.main_ovv = false; di->events.main_thermal_prot = false; break; case POWER_SUPPLY_HEALTH_COLD: case POWER_SUPPLY_HEALTH_OVERHEAT: di->events.main_thermal_prot = true; di->events.mainextchnotok = false; di->events.main_ovv = false; di->events.ac_wd_expired = false; break; case POWER_SUPPLY_HEALTH_OVERVOLTAGE: di->events.main_ovv = true; di->events.mainextchnotok = false; di->events.main_thermal_prot = false; di->events.ac_wd_expired = false; break; case POWER_SUPPLY_HEALTH_GOOD: di->events.main_thermal_prot = false; di->events.mainextchnotok = false; di->events.main_ovv = false; di->events.ac_wd_expired = false; break; default: break; } break; case POWER_SUPPLY_TYPE_USB: switch (ret.intval) { case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE: di->events.usbchargernotok = true; di->events.usb_thermal_prot = false; di->events.vbus_ovv = false; di->events.usb_wd_expired = false; break; case POWER_SUPPLY_HEALTH_DEAD: di->events.usb_wd_expired = true; di->events.usbchargernotok = false; di->events.usb_thermal_prot = false; di->events.vbus_ovv = false; break; case POWER_SUPPLY_HEALTH_COLD: case POWER_SUPPLY_HEALTH_OVERHEAT: di->events.usb_thermal_prot = true; di->events.usbchargernotok = false; di->events.vbus_ovv = false; di->events.usb_wd_expired = false; break; case POWER_SUPPLY_HEALTH_OVERVOLTAGE: di->events.vbus_ovv = true; di->events.usbchargernotok = false; di->events.usb_thermal_prot = false; di->events.usb_wd_expired = false; break; case POWER_SUPPLY_HEALTH_GOOD: di->events.usbchargernotok = false; di->events.usb_thermal_prot = false; di->events.vbus_ovv = false; di->events.usb_wd_expired = false; break; default: break; } default: break; } break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: switch (ext->type) { case POWER_SUPPLY_TYPE_BATTERY: di->batt_data.volt = ret.intval / 1000; break; case POWER_SUPPLY_TYPE_MAINS: di->chg_info.ac_volt = ret.intval / 1000; break; case POWER_SUPPLY_TYPE_USB: di->chg_info.usb_volt = ret.intval / 1000; break; default: break; } break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: switch (ext->type) { case POWER_SUPPLY_TYPE_MAINS: /* AVG is used to indicate when we are * in CV mode */ if (ret.intval) di->events.ac_cv_active = true; else di->events.ac_cv_active = false; break; case POWER_SUPPLY_TYPE_USB: /* AVG is used to indicate when we are * in CV mode */ if (ret.intval) di->events.usb_cv_active = true; else di->events.usb_cv_active = false; break; default: break; } break; case POWER_SUPPLY_PROP_TECHNOLOGY: switch (ext->type) { case POWER_SUPPLY_TYPE_BATTERY: if (ret.intval) di->events.batt_unknown = false; else di->events.batt_unknown = true; break; default: break; } break; case POWER_SUPPLY_PROP_TEMP: di->batt_data.temp = ret.intval / 10; break; case POWER_SUPPLY_PROP_CURRENT_NOW: switch (ext->type) { case POWER_SUPPLY_TYPE_MAINS: di->chg_info.ac_curr = ret.intval / 1000; break; case POWER_SUPPLY_TYPE_USB: di->chg_info.usb_curr = ret.intval / 1000; break; case POWER_SUPPLY_TYPE_BATTERY: di->batt_data.inst_curr = ret.intval / 1000; break; default: break; } break; case POWER_SUPPLY_PROP_CURRENT_AVG: switch (ext->type) { case POWER_SUPPLY_TYPE_BATTERY: di->batt_data.avg_curr = ret.intval / 1000; break; case POWER_SUPPLY_TYPE_USB: if (ret.intval) di->events.vbus_collapsed = true; else di->events.vbus_collapsed = false; break; default: break; } break; case POWER_SUPPLY_PROP_CAPACITY: if (!capacity_updated) di->batt_data.percent = ret.intval; break; default: break; } } return 0; } /** * abx500_chargalg_external_power_changed() - callback for power supply changes * @psy: pointer to the structure power_supply * * This function is the entry point of the pointer external_power_changed * of the structure power_supply. * This function gets executed when there is a change in any external power * supply that this driver needs to be notified of. */ static void abx500_chargalg_external_power_changed(struct power_supply *psy) { struct abx500_chargalg *di = to_abx500_chargalg_device_info(psy); /* * Trigger execution of the algorithm instantly and read * all power_supply properties there instead */ queue_work(di->chargalg_wq, &di->chargalg_work); } /** * abx500_chargalg_algorithm() - Main function for the algorithm * @di: pointer to the abx500_chargalg structure * * This is the main control function for the charging algorithm. * It is called periodically or when something happens that will * trigger a state change */ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) { int charger_status; int ret; int curr_step_lvl; /* Collect data from all power_supply class devices */ class_for_each_device(power_supply_class, NULL, &di->chargalg_psy, abx500_chargalg_get_ext_psy_data); abx500_chargalg_end_of_charge(di); abx500_chargalg_check_temp(di); abx500_chargalg_check_charger_voltage(di); charger_status = abx500_chargalg_check_charger_connection(di); abx500_chargalg_check_current_step_status(di); if (is_ab8500(di->parent)) { ret = abx500_chargalg_check_charger_enable(di); if (ret < 0) dev_err(di->dev, "Checking charger is enabled error" ": Returned Value %d\n", ret); } /* * First check if we have a charger connected. * Also we don't allow charging of unknown batteries if configured * this way */ if (!charger_status || (di->events.batt_unknown && !di->bm->chg_unknown_bat)) { if (di->charge_state != STATE_HANDHELD) { di->events.safety_timer_expired = false; abx500_chargalg_state_to(di, STATE_HANDHELD_INIT); } } /* If suspended, we should not continue checking the flags */ else if (di->charge_state == STATE_SUSPENDED_INIT || di->charge_state == STATE_SUSPENDED) { /* We don't do anything here, just don,t continue */ } /* Safety timer expiration */ else if (di->events.safety_timer_expired) { if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED) abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED_INIT); } /* * Check if any interrupts has occured * that will prevent us from charging */ /* Battery removed */ else if (di->events.batt_rem) { if (di->charge_state != STATE_BATT_REMOVED) abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT); } /* Main or USB charger not ok. */ else if (di->events.mainextchnotok || di->events.usbchargernotok) { /* * If vbus_collapsed is set, we have to lower the charger * current, which is done in the normal state below */ if (di->charge_state != STATE_CHG_NOT_OK && !di->events.vbus_collapsed) abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT); } /* VBUS, Main or VBAT OVV. */ else if (di->events.vbus_ovv || di->events.main_ovv || di->events.batt_ovv || !di->chg_info.usb_chg_ok || !di->chg_info.ac_chg_ok) { if (di->charge_state != STATE_OVV_PROTECT) abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT); } /* USB Thermal, stop charging */ else if (di->events.main_thermal_prot || di->events.usb_thermal_prot) { if (di->charge_state != STATE_HW_TEMP_PROTECT) abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT_INIT); } /* Battery temp over/under */ else if (di->events.btemp_underover) { if (di->charge_state != STATE_TEMP_UNDEROVER) abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER_INIT); } /* Watchdog expired */ else if (di->events.ac_wd_expired || di->events.usb_wd_expired) { if (di->charge_state != STATE_WD_EXPIRED) abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT); } /* Battery temp high/low */ else if (di->events.btemp_lowhigh) { if (di->charge_state != STATE_TEMP_LOWHIGH) abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT); } dev_dbg(di->dev, "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d " "State %s Active_chg %d Chg_status %d AC %d USB %d " "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d " "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n", di->batt_data.volt, di->batt_data.avg_curr, di->batt_data.inst_curr, di->batt_data.temp, di->batt_data.percent, di->maintenance_chg, states[di->charge_state], di->chg_info.charger_type, di->charge_status, di->chg_info.conn_chg & AC_CHG, di->chg_info.conn_chg & USB_CHG, di->chg_info.online_chg & AC_CHG, di->chg_info.online_chg & USB_CHG, di->events.ac_cv_active, di->events.usb_cv_active, di->chg_info.ac_curr, di->chg_info.usb_curr, di->chg_info.ac_vset, di->chg_info.ac_iset, di->chg_info.usb_vset, di->chg_info.usb_iset); switch (di->charge_state) { case STATE_HANDHELD_INIT: abx500_chargalg_stop_charging(di); di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING; abx500_chargalg_state_to(di, STATE_HANDHELD); /* Intentional fallthrough */ case STATE_HANDHELD: break; case STATE_SUSPENDED_INIT: if (di->susp_status.ac_suspended) abx500_chargalg_ac_en(di, false, 0, 0); if (di->susp_status.usb_suspended) abx500_chargalg_usb_en(di, false, 0, 0); abx500_chargalg_stop_safety_timer(di); abx500_chargalg_stop_maintenance_timer(di); di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING; di->maintenance_chg = false; abx500_chargalg_state_to(di, STATE_SUSPENDED); power_supply_changed(&di->chargalg_psy); /* Intentional fallthrough */ case STATE_SUSPENDED: /* CHARGING is suspended */ break; case STATE_BATT_REMOVED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_BATT_REMOVED); /* Intentional fallthrough */ case STATE_BATT_REMOVED: if (!di->events.batt_rem) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_HW_TEMP_PROTECT_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT); /* Intentional fallthrough */ case STATE_HW_TEMP_PROTECT: if (!di->events.main_thermal_prot && !di->events.usb_thermal_prot) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_OVV_PROTECT_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_OVV_PROTECT); /* Intentional fallthrough */ case STATE_OVV_PROTECT: if (!di->events.vbus_ovv && !di->events.main_ovv && !di->events.batt_ovv && di->chg_info.usb_chg_ok && di->chg_info.ac_chg_ok) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_CHG_NOT_OK_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_CHG_NOT_OK); /* Intentional fallthrough */ case STATE_CHG_NOT_OK: if (!di->events.mainextchnotok && !di->events.usbchargernotok) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_SAFETY_TIMER_EXPIRED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED); /* Intentional fallthrough */ case STATE_SAFETY_TIMER_EXPIRED: /* We exit this state when charger is removed */ break; case STATE_NORMAL_INIT: if ((di->chg_info.charger_type & USB_CHG) && di->usb_chg->power_path) { if (di->batt_data.volt > (di->bm->fg_params->lowbat_threshold + BAT_PLUS_MARGIN)) { ab8540_chargalg_usb_pre_chg_en(di, false); ab8540_chargalg_usb_pp_en(di, false); } else { ab8540_chargalg_usb_pp_en(di, true); ab8540_chargalg_usb_pre_chg_en(di, true); abx500_chargalg_state_to(di, STATE_USB_PP_PRE_CHARGE); break; } } if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW) abx500_chargalg_stop_charging(di); else { curr_step_lvl = di->bm->bat_type[ di->bm->batt_id].normal_cur_lvl * di->curr_status.curr_step / CHARGALG_CURR_STEP_HIGH; abx500_chargalg_start_charging(di, di->bm->bat_type[di->bm->batt_id] .normal_vol_lvl, curr_step_lvl); } abx500_chargalg_state_to(di, STATE_NORMAL); abx500_chargalg_start_safety_timer(di); abx500_chargalg_stop_maintenance_timer(di); init_maxim_chg_curr(di); di->charge_status = POWER_SUPPLY_STATUS_CHARGING; di->eoc_cnt = 0; di->maintenance_chg = false; power_supply_changed(&di->chargalg_psy); break; case STATE_USB_PP_PRE_CHARGE: if (di->batt_data.volt > (di->bm->fg_params->lowbat_threshold + BAT_PLUS_MARGIN)) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_NORMAL: handle_maxim_chg_curr(di); if (di->charge_status == POWER_SUPPLY_STATUS_FULL && di->maintenance_chg) { if (di->bm->no_maintenance) abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE_INIT); else abx500_chargalg_state_to(di, STATE_MAINTENANCE_A_INIT); } break; /* This state will be used when the maintenance state is disabled */ case STATE_WAIT_FOR_RECHARGE_INIT: abx500_chargalg_hold_charging(di); abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE); /* Intentional fallthrough */ case STATE_WAIT_FOR_RECHARGE: if (di->batt_data.percent <= di->bm->bat_type[di->bm->batt_id]. recharge_cap) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_MAINTENANCE_A_INIT: abx500_chargalg_stop_safety_timer(di); abx500_chargalg_start_maintenance_timer(di, di->bm->bat_type[ di->bm->batt_id].maint_a_chg_timer_h); abx500_chargalg_start_charging(di, di->bm->bat_type[ di->bm->batt_id].maint_a_vol_lvl, di->bm->bat_type[ di->bm->batt_id].maint_a_cur_lvl); abx500_chargalg_state_to(di, STATE_MAINTENANCE_A); power_supply_changed(&di->chargalg_psy); /* Intentional fallthrough*/ case STATE_MAINTENANCE_A: if (di->events.maintenance_timer_expired) { abx500_chargalg_stop_maintenance_timer(di); abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT); } break; case STATE_MAINTENANCE_B_INIT: abx500_chargalg_start_maintenance_timer(di, di->bm->bat_type[ di->bm->batt_id].maint_b_chg_timer_h); abx500_chargalg_start_charging(di, di->bm->bat_type[ di->bm->batt_id].maint_b_vol_lvl, di->bm->bat_type[ di->bm->batt_id].maint_b_cur_lvl); abx500_chargalg_state_to(di, STATE_MAINTENANCE_B); power_supply_changed(&di->chargalg_psy); /* Intentional fallthrough*/ case STATE_MAINTENANCE_B: if (di->events.maintenance_timer_expired) { abx500_chargalg_stop_maintenance_timer(di); abx500_chargalg_state_to(di, STATE_NORMAL_INIT); } break; case STATE_TEMP_LOWHIGH_INIT: abx500_chargalg_start_charging(di, di->bm->bat_type[ di->bm->batt_id].low_high_vol_lvl, di->bm->bat_type[ di->bm->batt_id].low_high_cur_lvl); abx500_chargalg_stop_maintenance_timer(di); di->charge_status = POWER_SUPPLY_STATUS_CHARGING; abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH); power_supply_changed(&di->chargalg_psy); /* Intentional fallthrough */ case STATE_TEMP_LOWHIGH: if (!di->events.btemp_lowhigh) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_WD_EXPIRED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_WD_EXPIRED); /* Intentional fallthrough */ case STATE_WD_EXPIRED: if (!di->events.ac_wd_expired && !di->events.usb_wd_expired) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; case STATE_TEMP_UNDEROVER_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER); /* Intentional fallthrough */ case STATE_TEMP_UNDEROVER: if (!di->events.btemp_underover) abx500_chargalg_state_to(di, STATE_NORMAL_INIT); break; } /* Start charging directly if the new state is a charge state */ if (di->charge_state == STATE_NORMAL_INIT || di->charge_state == STATE_MAINTENANCE_A_INIT || di->charge_state == STATE_MAINTENANCE_B_INIT) queue_work(di->chargalg_wq, &di->chargalg_work); } /** * abx500_chargalg_periodic_work() - Periodic work for the algorithm * @work: pointer to the work_struct structure * * Work queue function for the charging algorithm */ static void abx500_chargalg_periodic_work(struct work_struct *work) { struct abx500_chargalg *di = container_of(work, struct abx500_chargalg, chargalg_periodic_work.work); abx500_chargalg_algorithm(di); /* * If a charger is connected then the battery has to be monitored * frequently, else the work can be delayed. */ if (di->chg_info.conn_chg) queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, di->bm->interval_charging * HZ); else queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, di->bm->interval_not_charging * HZ); } /** * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog * @work: pointer to the work_struct structure * * Work queue function for kicking the charger watchdog */ static void abx500_chargalg_wd_work(struct work_struct *work) { int ret; struct abx500_chargalg *di = container_of(work, struct abx500_chargalg, chargalg_wd_work.work); dev_dbg(di->dev, "abx500_chargalg_wd_work\n"); ret = abx500_chargalg_kick_watchdog(di); if (ret < 0) dev_err(di->dev, "failed to kick watchdog\n"); queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, CHG_WD_INTERVAL); } /** * abx500_chargalg_work() - Work to run the charging algorithm instantly * @work: pointer to the work_struct structure * * Work queue function for calling the charging algorithm */ static void abx500_chargalg_work(struct work_struct *work) { struct abx500_chargalg *di = container_of(work, struct abx500_chargalg, chargalg_work); abx500_chargalg_algorithm(di); } /** * abx500_chargalg_get_property() - get the chargalg properties * @psy: pointer to the power_supply structure * @psp: pointer to the power_supply_property structure * @val: pointer to the power_supply_propval union * * This function gets called when an application tries to get the * chargalg properties by reading the sysfs files. * status: charging/discharging/full/unknown * health: health of the battery * Returns error code in case of failure else 0 on success */ static int abx500_chargalg_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct abx500_chargalg *di; di = to_abx500_chargalg_device_info(psy); switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = di->charge_status; break; case POWER_SUPPLY_PROP_HEALTH: if (di->events.batt_ovv) { val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; } else if (di->events.btemp_underover) { if (di->batt_data.temp <= di->bm->temp_under) val->intval = POWER_SUPPLY_HEALTH_COLD; else val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED || di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) { val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; } else { val->intval = POWER_SUPPLY_HEALTH_GOOD; } break; default: return -EINVAL; } return 0; } /* Exposure to the sysfs interface */ static ssize_t abx500_chargalg_curr_step_show(struct abx500_chargalg *di, char *buf) { return sprintf(buf, "%d\n", di->curr_status.curr_step); } static ssize_t abx500_chargalg_curr_step_store(struct abx500_chargalg *di, const char *buf, size_t length) { long int param; int ret; ret = kstrtol(buf, 10, &param); if (ret < 0) return ret; di->curr_status.curr_step = param; if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW && di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) { di->curr_status.curr_step_change = true; queue_work(di->chargalg_wq, &di->chargalg_work); } else dev_info(di->dev, "Wrong current step\n" "Enter 0. Disable AC/USB Charging\n" "1--100. Set AC/USB charging current step\n" "100. Enable AC/USB Charging\n"); return strlen(buf); } static ssize_t abx500_chargalg_en_show(struct abx500_chargalg *di, char *buf) { return sprintf(buf, "%d\n", di->susp_status.ac_suspended && di->susp_status.usb_suspended); } static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di, const char *buf, size_t length) { long int param; int ac_usb; int ret; ret = kstrtol(buf, 10, &param); if (ret < 0) return ret; ac_usb = param; switch (ac_usb) { case 0: /* Disable charging */ di->susp_status.ac_suspended = true; di->susp_status.usb_suspended = true; di->susp_status.suspended_change = true; /* Trigger a state change */ queue_work(di->chargalg_wq, &di->chargalg_work); break; case 1: /* Enable AC Charging */ di->susp_status.ac_suspended = false; di->susp_status.suspended_change = true; /* Trigger a state change */ queue_work(di->chargalg_wq, &di->chargalg_work); break; case 2: /* Enable USB charging */ di->susp_status.usb_suspended = false; di->susp_status.suspended_change = true; /* Trigger a state change */ queue_work(di->chargalg_wq, &di->chargalg_work); break; default: dev_info(di->dev, "Wrong input\n" "Enter 0. Disable AC/USB Charging\n" "1. Enable AC charging\n" "2. Enable USB Charging\n"); }; return strlen(buf); } static struct abx500_chargalg_sysfs_entry abx500_chargalg_en_charger = __ATTR(chargalg, 0644, abx500_chargalg_en_show, abx500_chargalg_en_store); static struct abx500_chargalg_sysfs_entry abx500_chargalg_curr_step = __ATTR(chargalg_curr_step, 0644, abx500_chargalg_curr_step_show, abx500_chargalg_curr_step_store); static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct abx500_chargalg_sysfs_entry *entry = container_of(attr, struct abx500_chargalg_sysfs_entry, attr); struct abx500_chargalg *di = container_of(kobj, struct abx500_chargalg, chargalg_kobject); if (!entry->show) return -EIO; return entry->show(di, buf); } static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj, struct attribute *attr, const char *buf, size_t length) { struct abx500_chargalg_sysfs_entry *entry = container_of(attr, struct abx500_chargalg_sysfs_entry, attr); struct abx500_chargalg *di = container_of(kobj, struct abx500_chargalg, chargalg_kobject); if (!entry->store) return -EIO; return entry->store(di, buf, length); } static struct attribute *abx500_chargalg_chg[] = { &abx500_chargalg_en_charger.attr, &abx500_chargalg_curr_step.attr, NULL, }; static const struct sysfs_ops abx500_chargalg_sysfs_ops = { .show = abx500_chargalg_sysfs_show, .store = abx500_chargalg_sysfs_charger, }; static struct kobj_type abx500_chargalg_ktype = { .sysfs_ops = &abx500_chargalg_sysfs_ops, .default_attrs = abx500_chargalg_chg, }; /** * abx500_chargalg_sysfs_exit() - de-init of sysfs entry * @di: pointer to the struct abx500_chargalg * * This function removes the entry in sysfs. */ static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di) { kobject_del(&di->chargalg_kobject); } /** * abx500_chargalg_sysfs_init() - init of sysfs entry * @di: pointer to the struct abx500_chargalg * * This function adds an entry in sysfs. * Returns error code in case of failure else 0(on success) */ static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di) { int ret = 0; ret = kobject_init_and_add(&di->chargalg_kobject, &abx500_chargalg_ktype, NULL, "abx500_chargalg"); if (ret < 0) dev_err(di->dev, "failed to create sysfs entry\n"); return ret; } /* Exposure to the sysfs interface <<END>> */ #if defined(CONFIG_PM) static int abx500_chargalg_resume(struct platform_device *pdev) { struct abx500_chargalg *di = platform_get_drvdata(pdev); /* Kick charger watchdog if charging (any charger online) */ if (di->chg_info.online_chg) queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0); /* * Run the charging algorithm directly to be sure we don't * do it too seldom */ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0); return 0; } static int abx500_chargalg_suspend(struct platform_device *pdev, pm_message_t state) { struct abx500_chargalg *di = platform_get_drvdata(pdev); if (di->chg_info.online_chg) cancel_delayed_work_sync(&di->chargalg_wd_work); cancel_delayed_work_sync(&di->chargalg_periodic_work); return 0; } #else #define abx500_chargalg_suspend NULL #define abx500_chargalg_resume NULL #endif static int abx500_chargalg_remove(struct platform_device *pdev) { struct abx500_chargalg *di = platform_get_drvdata(pdev); /* sysfs interface to enable/disbale charging from user space */ abx500_chargalg_sysfs_exit(di); hrtimer_cancel(&di->safety_timer); hrtimer_cancel(&di->maintenance_timer); cancel_delayed_work_sync(&di->chargalg_periodic_work); cancel_delayed_work_sync(&di->chargalg_wd_work); cancel_work_sync(&di->chargalg_work); /* Delete the work queue */ destroy_workqueue(di->chargalg_wq); power_supply_unregister(&di->chargalg_psy); platform_set_drvdata(pdev, NULL); return 0; } static char *supply_interface[] = { "ab8500_fg", }; static int abx500_chargalg_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct abx500_bm_data *plat = pdev->dev.platform_data; struct abx500_chargalg *di; int ret = 0; di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL); if (!di) { dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__); return -ENOMEM; } if (!plat) { dev_err(&pdev->dev, "no battery management data supplied\n"); return -EINVAL; } di->bm = plat; if (np) { ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm); if (ret) { dev_err(&pdev->dev, "failed to get battery information\n"); return ret; } } /* get device struct and parent */ di->dev = &pdev->dev; di->parent = dev_get_drvdata(pdev->dev.parent); /* chargalg supply */ di->chargalg_psy.name = "abx500_chargalg"; di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY; di->chargalg_psy.properties = abx500_chargalg_props; di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props); di->chargalg_psy.get_property = abx500_chargalg_get_property; di->chargalg_psy.supplied_to = supply_interface; di->chargalg_psy.num_supplicants = ARRAY_SIZE(supply_interface), di->chargalg_psy.external_power_changed = abx500_chargalg_external_power_changed; /* Initilialize safety timer */ hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); di->safety_timer.function = abx500_chargalg_safety_timer_expired; /* Initilialize maintenance timer */ hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); di->maintenance_timer.function = abx500_chargalg_maintenance_timer_expired; /* Create a work queue for the chargalg */ di->chargalg_wq = create_singlethread_workqueue("abx500_chargalg_wq"); if (di->chargalg_wq == NULL) { dev_err(di->dev, "failed to create work queue\n"); return -ENOMEM; } /* Init work for chargalg */ INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work, abx500_chargalg_periodic_work); INIT_DEFERRABLE_WORK(&di->chargalg_wd_work, abx500_chargalg_wd_work); /* Init work for chargalg */ INIT_WORK(&di->chargalg_work, abx500_chargalg_work); /* To detect charger at startup */ di->chg_info.prev_conn_chg = -1; /* Register chargalg power supply class */ ret = power_supply_register(di->dev, &di->chargalg_psy); if (ret) { dev_err(di->dev, "failed to register chargalg psy\n"); goto free_chargalg_wq; } platform_set_drvdata(pdev, di); /* sysfs interface to enable/disable charging from user space */ ret = abx500_chargalg_sysfs_init(di); if (ret) { dev_err(di->dev, "failed to create sysfs entry\n"); goto free_psy; } di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH; /* Run the charging algorithm */ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0); dev_info(di->dev, "probe success\n"); return ret; free_psy: power_supply_unregister(&di->chargalg_psy); free_chargalg_wq: destroy_workqueue(di->chargalg_wq); return ret; } static const struct of_device_id ab8500_chargalg_match[] = { { .compatible = "stericsson,ab8500-chargalg", }, { }, }; static struct platform_driver abx500_chargalg_driver = { .probe = abx500_chargalg_probe, .remove = abx500_chargalg_remove, .suspend = abx500_chargalg_suspend, .resume = abx500_chargalg_resume, .driver = { .name = "ab8500-chargalg", .owner = THIS_MODULE, .of_match_table = ab8500_chargalg_match, }, }; module_platform_driver(abx500_chargalg_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Johan Palsson, Karl Komierowski"); MODULE_ALIAS("platform:abx500-chargalg"); MODULE_DESCRIPTION("abx500 battery charging algorithm");
gpl-2.0
mipltd/imx6merlin-linux-3.14.28
drivers/media/platform/soc_camera/soc_mediabus.c
3108
13428
/* * soc-camera media bus helper routines * * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> static const struct soc_mbus_lookup mbus_fmt[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555, .name = "RGB555", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555X, .name = "RGB555X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565, .name = "RGB565", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565X, .name = "RGB565X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB666_1X18, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB666/32bpp", .bits_per_sample = 18, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB888_1X24, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB888/32bpp", .bits_per_sample = 24, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB888_2X12_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB888/32bpp", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_RGB888_2X12_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB888/32bpp", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR8, .name = "Bayer 8 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_Y8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_GREY, .name = "Grey", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_Y10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_Y10, .name = "Grey 10bit", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_JPEG_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_JPEG, .name = "JPEG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_VARIABLE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB444, .name = "RGB444", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUV420, .name = "YUYV 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVU420, .name = "YVYU 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_UYVY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_VYUY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG8, .name = "Bayer 8 GRBG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, .name = "Bayer 10 BGGR DPCM 8", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGBRG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG10, .name = "Bayer 10 GBRG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10, .name = "Bayer 10 GRBG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SRGGB10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB10, .name = "Bayer 10 RGGB", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR12, .name = "Bayer 12 BGGR", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGBRG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG12, .name = "Bayer 12 GBRG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG12, .name = "Bayer 12 GRBG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SRGGB12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB12, .name = "Bayer 12 RGGB", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, }; int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, unsigned int *numerator, unsigned int *denominator) { switch (mf->packing) { case SOC_MBUS_PACKING_NONE: case SOC_MBUS_PACKING_EXTEND16: *numerator = 1; *denominator = 1; return 0; case SOC_MBUS_PACKING_EXTEND32: *numerator = 1; *denominator = 1; return 0; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: *numerator = 2; *denominator = 1; return 0; case SOC_MBUS_PACKING_1_5X8: *numerator = 3; *denominator = 2; return 0; case SOC_MBUS_PACKING_VARIABLE: *numerator = 0; *denominator = 1; return 0; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { if (mf->layout != SOC_MBUS_LAYOUT_PACKED) return width * mf->bits_per_sample / 8; switch (mf->packing) { case SOC_MBUS_PACKING_NONE: return width * mf->bits_per_sample / 8; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: case SOC_MBUS_PACKING_EXTEND16: return width * 2; case SOC_MBUS_PACKING_1_5X8: return width * 3 / 2; case SOC_MBUS_PACKING_VARIABLE: return 0; case SOC_MBUS_PACKING_EXTEND32: return width * 4; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_bytes_per_line); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height) { if (mf->layout == SOC_MBUS_LAYOUT_PACKED) return bytes_per_line * height; switch (mf->packing) { case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: return bytes_per_line * height * 2; case SOC_MBUS_PACKING_1_5X8: return bytes_per_line * height * 3 / 2; default: return -EINVAL; } } EXPORT_SYMBOL(soc_mbus_image_size); const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( enum v4l2_mbus_pixelcode code, const struct soc_mbus_lookup *lookup, int n) { int i; for (i = 0; i < n; i++) if (lookup[i].code == code) return &lookup[i].fmt; return NULL; } EXPORT_SYMBOL(soc_mbus_find_fmtdesc); const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( enum v4l2_mbus_pixelcode code) { return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); } EXPORT_SYMBOL(soc_mbus_get_fmtdesc); unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, unsigned int flags) { unsigned long common_flags; bool hsync = true, vsync = true, pclk, data, mode; bool mipi_lanes, mipi_clock; common_flags = cfg->flags & flags; switch (cfg->type) { case V4L2_MBUS_PARALLEL: hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW); vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW); case V4L2_MBUS_BT656: pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING); data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_LOW); mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE); return (!hsync || !vsync || !pclk || !data || !mode) ? 0 : common_flags; case V4L2_MBUS_CSI2: mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES; mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; } return 0; } EXPORT_SYMBOL(soc_mbus_config_compatible); static int __init soc_mbus_init(void) { return 0; } static void __exit soc_mbus_exit(void) { } module_init(soc_mbus_init); module_exit(soc_mbus_exit); MODULE_DESCRIPTION("soc-camera media bus interface"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
CyanogenMod/android_kernel_samsung_klimtwifi
lib/bug.c
4644
4785
/* Generic support for BUG() This respects the following config options: CONFIG_BUG - emit BUG traps. Nothing happens without this. CONFIG_GENERIC_BUG - enable this code. CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to the containing struct bug_entry for bug_addr and file. CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable (though they're generally always on). CONFIG_GENERIC_BUG is set by each architecture using this code. To use this, your architecture must: 1. Set up the config options: - Enable CONFIG_GENERIC_BUG if CONFIG_BUG 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON) - Define HAVE_ARCH_BUG - Implement BUG() to generate a faulting instruction - NOTE: struct bug_entry does not have "file" or "line" entries when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate the values accordingly. 3. Implement the trap - In the illegal instruction trap handler (typically), verify that the fault was in kernel mode, and call report_bug() - report_bug() will return whether it was a false alarm, a warning, or an actual bug. - You must implement the is_valid_bugaddr(bugaddr) callback which returns true if the eip is a real kernel address, and it points to the expected BUG trap instruction. Jeremy Fitzhardinge <jeremy@goop.org> 2006 */ #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/sched.h> extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; static inline unsigned long bug_addr(const struct bug_entry *bug) { #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS return bug->bug_addr; #else return (unsigned long)bug + bug->bug_addr_disp; #endif } #ifdef CONFIG_MODULES static LIST_HEAD(module_bug_list); static const struct bug_entry *module_find_bug(unsigned long bugaddr) { struct module *mod; list_for_each_entry(mod, &module_bug_list, bug_list) { const struct bug_entry *bug = mod->bug_table; unsigned i; for (i = 0; i < mod->num_bugs; ++i, ++bug) if (bugaddr == bug_addr(bug)) return bug; } return NULL; } void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { char *secstrings; unsigned int i; mod->bug_table = NULL; mod->num_bugs = 0; /* Find the __bug_table section, if present */ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (i = 1; i < hdr->e_shnum; i++) { if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) continue; mod->bug_table = (void *) sechdrs[i].sh_addr; mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); break; } /* * Strictly speaking this should have a spinlock to protect against * traversals, but since we only traverse on BUG()s, a spinlock * could potentially lead to deadlock and thus be counter-productive. */ list_add(&mod->bug_list, &module_bug_list); } void module_bug_cleanup(struct module *mod) { list_del(&mod->bug_list); } #else static inline const struct bug_entry *module_find_bug(unsigned long bugaddr) { return NULL; } #endif const struct bug_entry *find_bug(unsigned long bugaddr) { const struct bug_entry *bug; for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) if (bugaddr == bug_addr(bug)) return bug; return module_find_bug(bugaddr); } enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) { const struct bug_entry *bug; const char *file; unsigned line, warning; if (!is_valid_bugaddr(bugaddr)) return BUG_TRAP_TYPE_NONE; bug = find_bug(bugaddr); file = NULL; line = 0; warning = 0; if (bug) { #ifdef CONFIG_DEBUG_BUGVERBOSE #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS file = bug->file; #else file = (const char *)bug + bug->file_disp; #endif line = bug->line; #endif warning = (bug->flags & BUGFLAG_WARNING) != 0; } if (warning) { /* this is a WARN_ON rather than BUG/BUG_ON */ printk(KERN_WARNING "------------[ cut here ]------------\n"); if (file) printk(KERN_WARNING "WARNING: at %s:%u\n", file, line); else printk(KERN_WARNING "WARNING: at %p " "[verbose debug info unavailable]\n", (void *)bugaddr); print_modules(); show_regs(regs); print_oops_end_marker(); add_taint(BUG_GET_TAINT(bug)); return BUG_TRAP_TYPE_WARN; } printk(KERN_DEFAULT "------------[ cut here ]------------\n"); if (file) printk(KERN_CRIT "kernel BUG at %s:%u!\n", file, line); else printk(KERN_CRIT "Kernel BUG at %p " "[verbose debug info unavailable]\n", (void *)bugaddr); return BUG_TRAP_TYPE_BUG; }
gpl-2.0
HtcLegacy/android_kernel_htc_msm7x27a
drivers/media/video/gspca/nw80x.c
4900
77534
/* * DivIO nw80x subdriver * * Copyright (C) 2011 Jean-François Moine (http://moinejf.free.fr) * Copyright (C) 2003 Sylvain Munaut <tnt@246tNt.com> * Kjell Claesson <keyson@users.sourceforge.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "nw80x" #include "gspca.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("NW80x USB Camera Driver"); MODULE_LICENSE("GPL"); static int webcam; /* controls */ enum e_ctrl { GAIN, EXPOSURE, AUTOGAIN, NCTRLS /* number of controls */ }; #define AUTOGAIN_DEF 1 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct gspca_ctrl ctrls[NCTRLS]; u32 ae_res; s8 ag_cnt; #define AG_CNT_START 13 u8 exp_too_low_cnt; u8 exp_too_high_cnt; u8 bridge; u8 webcam; }; enum bridges { BRIDGE_NW800, /* and et31x110 */ BRIDGE_NW801, BRIDGE_NW802, }; enum webcams { Generic800, SpaceCam, /* Trust 120 SpaceCam */ SpaceCam2, /* other Trust 120 SpaceCam */ Cvideopro, /* Conceptronic Video Pro */ Dlink350c, DS3303u, Kr651us, Kritter, Mustek300, Proscope, Twinkle, DvcV6, P35u, Generic802, NWEBCAMS /* number of webcams */ }; static const u8 webcam_chip[NWEBCAMS] = { [Generic800] = BRIDGE_NW800, /* 06a5:0000 * Typhoon Webcam 100 USB */ [SpaceCam] = BRIDGE_NW800, /* 06a5:d800 * Trust SpaceCam120 or SpaceCam100 PORTABLE */ [SpaceCam2] = BRIDGE_NW800, /* 06a5:d800 - pas106 * other Trust SpaceCam120 or SpaceCam100 PORTABLE */ [Cvideopro] = BRIDGE_NW802, /* 06a5:d001 * Conceptronic Video Pro 'CVIDEOPRO USB Webcam CCD' */ [Dlink350c] = BRIDGE_NW802, /* 06a5:d001 * D-Link NetQam Pro 250plus */ [DS3303u] = BRIDGE_NW801, /* 06a5:d001 * Plustek Opticam 500U or ProLink DS3303u */ [Kr651us] = BRIDGE_NW802, /* 06a5:d001 * Panasonic GP-KR651US */ [Kritter] = BRIDGE_NW802, /* 06a5:d001 * iRez Kritter cam */ [Mustek300] = BRIDGE_NW802, /* 055f:d001 * Mustek Wcam 300 mini */ [Proscope] = BRIDGE_NW802, /* 06a5:d001 * Scalar USB Microscope (ProScope) */ [Twinkle] = BRIDGE_NW800, /* 06a5:d800 - hv7121b? (seems pas106) * Divio Chicony TwinkleCam * DSB-C110 */ [DvcV6] = BRIDGE_NW802, /* 0502:d001 * DVC V6 */ [P35u] = BRIDGE_NW801, /* 052b:d001, 06a5:d001 and 06be:d001 * EZCam Pro p35u */ [Generic802] = BRIDGE_NW802, }; /* * other webcams: * - nw801 046d:d001 * Logitech QuickCam Pro (dark focus ring) * - nw801 0728:d001 * AVerMedia Camguard * - nw??? 06a5:d001 * D-Link NetQam Pro 250plus * - nw800 065a:d800 * Showcam NGS webcam * - nw??? ????:???? * Sceptre svc300 */ /* * registers * nw800/et31x110 nw801 nw802 * 0000..009e 0000..00a1 0000..009e * 0200..0211 id id * 0300..0302 id id * 0400..0406 (inex) 0400..0406 * 0500..0505 0500..0506 (inex) * 0600..061a 0600..0601 0600..0601 * 0800..0814 id id * 1000..109c 1000..10a1 1000..109a */ /* resolutions * nw800: 320x240, 352x288 * nw801/802: 320x240, 640x480 */ static const struct v4l2_pix_format cif_mode[] = { {320, 240, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, {352, 288, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG} }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, {640, 480, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, }; /* * The sequences below contain: * - 1st and 2nd bytes: either * - register number (BE) * - I2C0 + i2c address * - 3rd byte: data length (=0 for end of sequence) * - n bytes: data */ #define I2C0 0xff static const u8 nw800_init[] = { 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x01, 0x04, 0x06, 0x01, 0x04, 0x04, 0x04, 0x03, 0x00, 0x00, 0x00, 0x05, 0x05, 0x01, 0x00, 0, 0, 0 }; static const u8 nw800_start[] = { 0x04, 0x06, 0x01, 0xc0, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xa0, 0x48, 0xc3, 0x02, 0x88, 0x0c, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x06, 0x00, 0x08, 0x00, 0x32, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0xc0, 0x05, 0x00, 0x06, 0xe8, 0x00, 0x00, 0x00, 0x20, 0x20, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x83, 0x02, 0x20, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x04, 0x04, 0x01, 0xff, 0x04, 0x06, 0x01, 0xc4, 0x04, 0x06, 0x01, 0xc0, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xa0, 0x48, 0xc3, 0x02, 0x88, 0x0c, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x06, 0x00, 0x08, 0x00, 0x32, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0xc0, 0x05, 0x00, 0x06, 0xe8, 0x00, 0x00, 0x00, 0x20, 0x20, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x83, 0x02, 0x20, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, 0x00, 0x80, 0x01, 0xa0, 0x10, 0x1a, 0x01, 0x00, 0x00, 0x91, 0x02, 0x6c, 0x01, 0x00, 0x03, 0x02, 0xc8, 0x01, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0x83, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x10, 0x1b, 0x02, 0x69, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x05, 0x02, 0x01, 0x02, 0x06, 0x00, 0x02, 0x04, 0xd9, 0x05, 0x05, 0x01, 0x20, 0x05, 0x05, 0x01, 0x21, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x08, 0x21, 0x3d, 0x52, 0x63, 0x75, 0x83, 0x91, 0x9e, 0xaa, 0xb6, 0xc1, 0xcc, 0xd6, 0xe0, 0xea, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x13, 0x13, 0x10, 0x03, 0x01, 0x14, 0x10, 0x41, 0x11, 0x00, 0x08, 0x21, 0x3d, 0x52, 0x63, 0x75, 0x83, 0x91, 0x9e, 0xaa, 0xb6, 0xc1, 0xcc, 0xd6, 0xe0, 0xea, 0x10, 0x0b, 0x01, 0x14, 0x10, 0x0d, 0x01, 0x20, 0x10, 0x0c, 0x01, 0x34, 0x04, 0x06, 0x01, 0xc3, 0x04, 0x04, 0x01, 0x00, 0x05, 0x02, 0x01, 0x02, 0x06, 0x00, 0x02, 0x00, 0x48, 0x05, 0x05, 0x01, 0x20, 0x05, 0x05, 0x01, 0x21, 0, 0, 0 }; /* 06a5:d001 - nw801 - Panasonic * P35u */ static const u8 nw801_start_1[] = { 0x05, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x0e, 0x00, 0x00, 0xf9, 0x02, 0x11, 0x00, 0x0e, 0x01, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x22, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0xa8, 0x1f, 0x00, 0x0d, 0x02, 0x07, 0x00, 0x01, 0x00, 0x19, 0x00, 0xf2, 0x00, 0x18, 0x06, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x22, 0x02, 0x80, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x15, 0x08, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x35, 0xfd, 0x07, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x20, 0x10, 0x06, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0x10, 0x40, 0x40, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x10, 0x80, 0x22, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0, 0, 0, }; static const u8 nw801_start_qvga[] = { 0x02, 0x00, 0x10, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18, 0x0b, 0x06, 0xa2, 0x86, 0x78, 0x02, 0x0f, 0x01, 0x6b, 0x10, 0x1a, 0x01, 0x15, 0x00, 0x00, 0x01, 0x1e, 0x10, 0x00, 0x01, 0x2f, 0x10, 0x8c, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x11, 0x08, 0x29, 0x00, 0x18, 0x01, 0x1f, 0x00, 0xd2, 0x00, /* AE window */ 0, 0, 0, }; static const u8 nw801_start_vga[] = { 0x02, 0x00, 0x10, 0x78, 0xa0, 0x97, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xf0, 0x02, 0x0f, 0x01, 0xd5, 0x10, 0x1a, 0x01, 0x15, 0x00, 0x00, 0x01, 0x0e, 0x10, 0x00, 0x01, 0x22, 0x10, 0x8c, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x10, 0x11, 0x08, 0x51, 0x00, 0x30, 0x02, 0x3d, 0x00, 0xa4, 0x01, 0, 0, 0, }; static const u8 nw801_start_2[] = { 0x10, 0x04, 0x01, 0x1a, 0x10, 0x19, 0x01, 0x09, /* clock */ 0x10, 0x24, 0x06, 0xc0, 0x00, 0x3f, 0x02, 0x00, 0x01, /* .. gain .. */ 0x00, 0x03, 0x02, 0x92, 0x03, 0x00, 0x1d, 0x04, 0xf2, 0x00, 0x24, 0x07, 0x00, 0x7b, 0x01, 0xcf, 0x10, 0x94, 0x01, 0x07, 0x05, 0x05, 0x01, 0x01, 0x05, 0x04, 0x01, 0x01, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x48, 0x11, 0x00, 0x37, 0x55, 0x6b, 0x7d, 0x8d, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd4, 0xdd, 0xe6, 0xef, 0xf0, 0xf0, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x08, 0x10, 0x48, 0x11, 0x00, 0x37, 0x55, 0x6b, 0x7d, 0x8d, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd4, 0xdd, 0xe6, 0xef, 0xf0, 0xf0, 0x10, 0x0b, 0x01, 0x0b, 0x10, 0x0d, 0x01, 0x0b, 0x10, 0x0c, 0x01, 0x1f, 0x05, 0x06, 0x01, 0x03, 0, 0, 0 }; /* nw802 (sharp IR3Y38M?) */ static const u8 nw802_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x00, 0x00, 0xf9, 0x02, 0x10, 0x00, 0x4d, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x16, 0x00, 0x94, 0x00, 0x10, 0x06, 0x08, 0x00, 0x18, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa1, 0x02, 0x80, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0xff, 0x01, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0xad, 0x00, 0x00, 0x01, 0x08, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x51, 0x00, 0xf0, 0x00, 0x3d, 0x00, 0xb4, 0x00, 0x10, 0x1d, 0x08, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x10, 0x0e, 0x01, 0x27, 0x10, 0x41, 0x11, 0x00, 0x0e, 0x35, 0x4f, 0x62, 0x71, 0x7f, 0x8b, 0x96, 0xa0, 0xa9, 0xb2, 0xbb, 0xc3, 0xca, 0xd2, 0xd8, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x14, 0x14, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, /* 0x00, 0x0e, 0x35, 0x4f, 0x62, 0x71, 0x7f, 0x8b, * 0x96, 0xa0, 0xa9, 0xb2, 0xbb, 0xc3, 0xca, 0xd2, * 0xd8, */ 0x10, 0x0b, 0x01, 0x10, 0x10, 0x0d, 0x01, 0x11, 0x10, 0x0c, 0x01, 0x1c, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* et31x110 - Trust 120 SpaceCam */ static const u8 spacecam_init[] = { 0x04, 0x05, 0x01, 0x01, 0x04, 0x04, 0x01, 0x01, 0x04, 0x06, 0x01, 0x04, 0x04, 0x04, 0x03, 0x00, 0x00, 0x00, 0x05, 0x05, 0x01, 0x00, 0, 0, 0 }; static const u8 spacecam_start[] = { 0x04, 0x06, 0x01, 0x44, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xa0, 0x48, 0xc3, 0x02, 0x88, 0x0c, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x06, 0x00, 0x08, 0x00, 0x32, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x7c, 0x00, 0x80, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x83, 0x02, 0x20, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x04, 0x06, 0x01, 0xc0, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, 0x00, 0x80, 0x01, 0xa0, 0x10, 0x1a, 0x01, 0x00, 0x00, 0x91, 0x02, 0x32, 0x01, 0x00, 0x03, 0x02, 0x08, 0x02, 0x10, 0x00, 0x01, 0x83, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x64, 0x99, 0xc0, 0xe2, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x13, 0x13, 0x10, 0x03, 0x01, 0x06, 0x10, 0x41, 0x11, 0x00, 0x64, 0x99, 0xc0, 0xe2, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x0b, 0x01, 0x08, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1f, 0x04, 0x06, 0x01, 0xc3, 0x04, 0x05, 0x01, 0x40, 0x04, 0x04, 0x01, 0x40, 0, 0, 0 }; /* et31x110 - pas106 - other Trust SpaceCam120 */ static const u8 spacecam2_start[] = { 0x04, 0x06, 0x01, 0x44, 0x04, 0x06, 0x01, 0x00, 0x00, 0x00, 0x40, 0x14, 0x83, 0x00, 0xba, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x00, 0x00, 0x60, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x06, 0x00, 0xfc, 0x01, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb8, 0x48, 0x0f, 0x04, 0x88, 0x14, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x01, 0x00, 0x03, 0x00, 0x24, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0x00, 0x05, 0x00, 0x06, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x80, 0x02, 0x20, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x04, 0x04, 0x01, 0x40, 0x04, 0x04, 0x01, 0x00, I2C0, 0x40, 0x0c, 0x02, 0x0c, 0x12, 0x07, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x05, 0x05, I2C0, 0x40, 0x02, 0x11, 0x06, I2C0, 0x40, 0x02, 0x14, 0x00, I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, I2C0, 0x40, 0x02, 0x02, 0x0c, /* pixel clock */ I2C0, 0x40, 0x02, 0x0f, 0x00, I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ 0x10, 0x00, 0x01, 0x01, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, I2C0, 0x40, 0x02, 0x05, 0x0f, /* exposure */ I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ I2C0, 0x40, 0x07, 0x09, 0x0b, 0x0f, 0x05, 0x05, 0x0f, 0x00, /* gains */ I2C0, 0x40, 0x03, 0x12, 0x04, 0x01, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x13, 0x13, 0x10, 0x03, 0x01, 0x06, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x0b, 0x01, 0x11, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x14, 0x04, 0x06, 0x01, 0x03, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - Conceptronic Video Pro */ static const u8 cvideopro_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x54, 0x96, 0x98, 0xf9, 0x02, 0x18, 0x00, 0x4c, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0xc8, 0x00, 0xf4, 0x05, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0xc7, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x98, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x8c, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x06, 0xf2, 0x8f, 0xf0, 0x40, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xac, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x3b, 0x01, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x1f, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x1d, 0x02, 0x40, 0x06, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x46, 0x62, 0x76, 0x86, 0x94, 0xa0, 0xab, 0xb6, 0xbf, 0xc8, 0xcf, 0xd7, 0xdc, 0xdc, 0xdc, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x12, 0x12, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x46, 0x62, 0x76, 0x86, 0x94, 0xa0, 0xab, 0xb6, 0xbf, 0xc8, 0xcf, 0xd7, 0xdc, 0xdc, 0xdc, 0x10, 0x0b, 0x01, 0x09, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x2f, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - D-link dru-350c cam */ static const u8 dlink_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x00, 0x00, 0x92, 0x03, 0x10, 0x00, 0x4d, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x16, 0x00, 0x94, 0x00, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa1, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x01, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0xad, 0x00, 0x00, 0x01, 0x08, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x51, 0x00, 0xf0, 0x00, 0x3d, 0x00, 0xb4, 0x00, 0x10, 0x1d, 0x08, 0x40, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x0e, 0x01, 0x20, 0x10, 0x41, 0x11, 0x00, 0x07, 0x1e, 0x38, 0x4d, 0x60, 0x70, 0x7f, 0x8e, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd5, 0xdf, 0xea, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x11, 0x11, 0x10, 0x03, 0x01, 0x10, 0x10, 0x41, 0x11, 0x00, 0x07, 0x1e, 0x38, 0x4d, 0x60, 0x70, 0x7f, 0x8e, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd5, 0xdf, 0xea, 0x10, 0x0b, 0x01, 0x19, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1e, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* 06a5:d001 - nw801 - Sony * Plustek Opticam 500U or ProLink DS3303u (Hitachi HD49322BF) */ /*fixme: 320x240 only*/ static const u8 ds3303_start[] = { 0x05, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x16, 0x00, 0x00, 0xf9, 0x02, 0x11, 0x00, 0x0e, 0x01, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x22, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9, 0xa8, 0x1f, 0x00, 0x0d, 0x02, 0x07, 0x00, 0x01, 0x00, 0x19, 0x00, 0xf2, 0x00, 0x18, 0x06, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x03, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0x50, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x05, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x2f, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x1f, 0x10, 0x08, 0x0a, 0x0a, 0x51, 0x00, 0xf1, 0x00, 0x3c, 0x00, 0xb4, 0x00, 0x01, 0x15, 0xfd, 0x07, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x04, 0x01, 0x20, 0x02, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0x10, 0x40, 0x40, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x80, 0x00, 0x2d, 0x46, 0x58, 0x67, 0x74, 0x7f, 0x88, 0x94, 0x9d, 0xa6, 0xae, 0xb5, 0xbd, 0xc4, 0xcb, 0xd1, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x10, 0x80, 0x22, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x02, 0x0a, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x00, 0xf2, 0x8f, 0x81, 0x40, 0x10, 0x1a, 0x01, 0x15, 0x10, 0x00, 0x01, 0x2f, 0x10, 0x8c, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x26, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x24, 0x02, 0x40, 0x06, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x48, 0x11, 0x00, 0x15, 0x40, 0x67, 0x84, 0x9d, 0xb2, 0xc6, 0xd6, 0xe7, 0xf6, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x16, 0x16, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x48, 0x11, 0x00, 0x15, 0x40, 0x67, 0x84, 0x9d, 0xb2, 0xc6, 0xd6, 0xe7, 0xf6, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x0b, 0x01, 0x26, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1c, 0x05, 0x06, 0x01, 0x03, 0x05, 0x04, 0x01, 0x00, 0, 0, 0 }; /* 06a5:d001 - nw802 - Panasonic * GP-KR651US (Philips TDA8786) */ static const u8 kr651_start_1[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x44, 0x96, 0x98, 0xf9, 0x02, 0x18, 0x00, 0x48, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0xc8, 0x00, 0xf4, 0x05, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0xc7, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x18, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x02, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0, 0, 0 }; static const u8 kr651_start_qvga[] = { 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xac, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x29, 0x00, 0x18, 0x01, 0x1f, 0x00, 0xd2, 0x00, 0x10, 0x1d, 0x06, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x10, 0x1d, 0x02, 0x28, 0x01, 0, 0, 0 }; static const u8 kr651_start_vga[] = { 0x02, 0x00, 0x11, 0x78, 0xa0, 0x8c, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x30, 0x03, 0x01, 0x82, 0x82, 0x98, 0x80, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xa0, 0x10, 0x85, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x51, 0x00, 0x30, 0x02, 0x3d, 0x00, 0xa4, 0x01, 0x10, 0x1d, 0x06, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x10, 0x1d, 0x02, 0x68, 0x00, }; static const u8 kr651_start_2[] = { 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x11, 0x3c, 0x5c, 0x74, 0x88, 0x99, 0xa8, 0xb7, 0xc4, 0xd0, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x11, 0x3c, 0x5c, 0x74, 0x88, 0x99, 0xa8, 0xb7, 0xc4, 0xd0, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0x10, 0x0b, 0x01, 0x10, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x2d, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - iRez Kritter cam */ static const u8 kritter_start[] = { 0x04, 0x06, 0x01, 0x06, 0x00, 0x00, 0x40, 0x44, 0x96, 0x98, 0x94, 0x03, 0x18, 0x00, 0x48, 0x0f, 0x1e, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0x0a, 0x01, 0x28, 0x07, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0x0e, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x18, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0b, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x02, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x8c, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x06, 0xf2, 0x8f, 0xf0, 0x40, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xaf, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x3b, 0x01, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x1d, 0x06, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x10, 0x1d, 0x02, 0x00, 0x00, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x0d, 0x36, 0x4e, 0x60, 0x6f, 0x7b, 0x86, 0x90, 0x98, 0xa1, 0xa9, 0xb1, 0xb7, 0xbe, 0xc4, 0xcb, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0d, 0x0d, 0x10, 0x03, 0x01, 0x02, 0x10, 0x41, 0x11, 0x00, 0x0d, 0x36, 0x4e, 0x60, 0x6f, 0x7b, 0x86, 0x90, 0x98, 0xa1, 0xa9, 0xb1, 0xb7, 0xbe, 0xc4, 0xcb, 0x10, 0x0b, 0x01, 0x17, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1e, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - Mustek Wcam 300 mini */ static const u8 mustek_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x00, 0x00, 0x92, 0x03, 0x10, 0x00, 0x4d, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x16, 0x00, 0x94, 0x00, 0x10, 0x06, 0xfc, 0x05, 0x0c, 0x06, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa1, 0x02, 0x80, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x01, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0xad, 0x00, 0x00, 0x01, 0x08, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1d, 0x08, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x10, 0x0e, 0x01, 0x0f, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x29, 0x4a, 0x64, 0x7a, 0x8c, 0x9e, 0xad, 0xba, 0xc7, 0xd3, 0xde, 0xe8, 0xf1, 0xf9, 0xff, 0x10, 0x0f, 0x02, 0x11, 0x11, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x29, 0x4a, 0x64, 0x7a, 0x8c, 0x9e, 0xad, 0xba, 0xc7, 0xd3, 0xde, 0xe8, 0xf1, 0xf9, 0xff, 0x10, 0x0b, 0x01, 0x1c, 0x10, 0x0d, 0x01, 0x1a, 0x10, 0x0c, 0x01, 0x34, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x40, 0x04, 0x06, 0x01, 0x03, 0, 0, 0 }; /* nw802 - Scope USB Microscope M2 (ProScope) (Hitachi HD49322BF) */ static const u8 proscope_init[] = { 0x04, 0x05, 0x01, 0x21, 0x04, 0x04, 0x01, 0x01, 0, 0, 0 }; static const u8 proscope_start_1[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x01, 0x00, 0xf9, 0x02, 0x10, 0x00, 0x04, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x08, 0x00, 0x17, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0xce, 0x00, 0xf8, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xf6, 0x03, 0x34, 0x04, 0xf6, 0x03, 0x34, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xe8, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x1f, 0x0f, 0x08, 0x20, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x19, 0x00, 0x94, 0x00, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xad, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x1f, 0x10, 0x08, 0x0a, 0x0a, 0x51, 0x00, 0xf1, 0x00, 0x3c, 0x00, 0xb4, 0x00, 0x49, 0x13, 0x00, 0x00, 0x8c, 0x04, 0x01, 0x20, 0x02, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x2d, 0x46, 0x58, 0x67, 0x74, 0x7f, 0x88, 0x94, 0x9d, 0xa6, 0xae, 0xb5, 0xbd, 0xc4, 0xcb, 0xd1, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x09, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0, 0, 0 }; static const u8 proscope_start_qvga[] = { 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x06, 0x00, 0x03, 0x02, 0xf9, 0x02, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1d, 0x08, 0xc0, 0x0d, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x0e, 0x01, 0x10, 0, 0, 0 }; static const u8 proscope_start_vga[] = { 0x00, 0x03, 0x02, 0xf9, 0x02, 0x10, 0x85, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x02, 0x00, 0x11, 0x78, 0xa0, 0x8c, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x16, 0x00, 0x00, 0x82, 0x84, 0x00, 0x80, 0x10, 0x1a, 0x01, 0x06, 0x10, 0x00, 0x01, 0xa1, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x1d, 0x08, 0xc0, 0x0d, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x10, 0x0e, 0x01, 0x10, 0x10, 0x41, 0x11, 0x00, 0x10, 0x51, 0x6e, 0x83, 0x93, 0xa1, 0xae, 0xb9, 0xc3, 0xcc, 0xd4, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9, 0x10, 0x03, 0x01, 0x00, 0, 0, 0 }; static const u8 proscope_start_2[] = { 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x10, 0x51, 0x6e, 0x83, 0x93, 0xa1, 0xae, 0xb9, 0xc3, 0xcc, 0xd4, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9, 0x10, 0x0b, 0x01, 0x0b, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1b, 0x04, 0x06, 0x01, 0x03, 0x04, 0x05, 0x01, 0x21, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw800 - hv7121b? (seems pas106) - Divio Chicony TwinkleCam */ static const u8 twinkle_start[] = { 0x04, 0x06, 0x01, 0x44, 0x04, 0x06, 0x01, 0x00, 0x00, 0x00, 0x40, 0x14, 0x83, 0x00, 0xba, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x00, 0x00, 0x60, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x06, 0x00, 0xfc, 0x01, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb8, 0x48, 0x0f, 0x04, 0x88, 0x14, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x01, 0x00, 0x03, 0x00, 0x24, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0x00, 0x05, 0x00, 0x06, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x80, 0x02, 0x20, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x10, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x00, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x04, 0x04, 0x01, 0x10, 0x04, 0x04, 0x01, 0x00, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x01, I2C0, 0x40, 0x0c, 0x02, 0x0c, 0x12, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, I2C0, 0x40, 0x02, 0x11, 0x06, I2C0, 0x40, 0x02, 0x14, 0x00, I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ I2C0, 0x40, 0x02, 0x07, 0x01, 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, I2C0, 0x40, 0x02, 0x02, 0x0c, I2C0, 0x40, 0x02, 0x13, 0x01, 0x10, 0x00, 0x01, 0x01, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, I2C0, 0x40, 0x02, 0x05, 0x0f, I2C0, 0x40, 0x02, 0x13, 0x01, I2C0, 0x40, 0x08, 0x08, 0x04, 0x0b, 0x01, 0x01, 0x02, 0x00, 0x17, I2C0, 0x40, 0x03, 0x12, 0x00, 0x01, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, I2C0, 0x40, 0x02, 0x12, 0x00, I2C0, 0x40, 0x02, 0x0e, 0x00, I2C0, 0x40, 0x02, 0x11, 0x06, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x06, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x0b, 0x01, 0x19, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x0d, 0x04, 0x06, 0x01, 0x03, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x41, 0, 0, 0 }; /* nw802 dvc-v6 */ static const u8 dvcv6_start[] = { 0x04, 0x06, 0x01, 0x06, 0x00, 0x00, 0x40, 0x54, 0x96, 0x98, 0xf9, 0x02, 0x18, 0x00, 0x4c, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0xc8, 0x00, 0xf4, 0x05, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0xc7, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x98, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x00, 0x03, 0x02, 0x94, 0x03, 0x00, 0x1d, 0x04, 0x0a, 0x01, 0x28, 0x07, 0x00, 0x7b, 0x02, 0xe0, 0x00, 0x10, 0x8d, 0x01, 0x00, 0x00, 0x09, 0x04, 0x1e, 0x00, 0x0c, 0x02, 0x00, 0x91, 0x02, 0x0b, 0x02, 0x10, 0x00, 0x01, 0xaf, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x8f, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x06, 0xf2, 0x8f, 0xf0, 0x40, 0x10, 0x1a, 0x01, 0x02, 0x10, 0x00, 0x01, 0xaf, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x07, 0x01, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x1f, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x1d, 0x02, 0x40, 0x06, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x54, 0x6f, 0x82, 0x91, 0x9f, 0xaa, 0xb4, 0xbd, 0xc5, 0xcd, 0xd5, 0xdb, 0xdc, 0xdc, 0xdc, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x12, 0x12, 0x10, 0x03, 0x01, 0x11, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x54, 0x6f, 0x82, 0x91, 0x9f, 0xaa, 0xb4, 0xbd, 0xc5, 0xcd, 0xd5, 0xdb, 0xdc, 0xdc, 0xdc, 0x10, 0x0b, 0x01, 0x16, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1a, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, }; static const u8 *webcam_start[] = { [Generic800] = nw800_start, [SpaceCam] = spacecam_start, [SpaceCam2] = spacecam2_start, [Cvideopro] = cvideopro_start, [Dlink350c] = dlink_start, [DS3303u] = ds3303_start, [Kr651us] = kr651_start_1, [Kritter] = kritter_start, [Mustek300] = mustek_start, [Proscope] = proscope_start_1, [Twinkle] = twinkle_start, [DvcV6] = dvcv6_start, [P35u] = nw801_start_1, [Generic802] = nw802_start, }; /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, u16 index, const u8 *data, int len) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; if (len == 1) PDEBUG(D_USBO, "SET 00 0000 %04x %02x", index, *data); else PDEBUG(D_USBO, "SET 00 0000 %04x %02x %02x ...", index, *data, data[1]); memcpy(gspca_dev->usb_buf, data, len); ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x00, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, /* value */ index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* -- read registers in usb_buf -- */ static void reg_r(struct gspca_dev *gspca_dev, u16 index, int len) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; return; } if (len == 1) PDEBUG(D_USBI, "GET 00 0000 %04x %02x", index, gspca_dev->usb_buf[0]); else PDEBUG(D_USBI, "GET 00 0000 %04x %02x %02x ..", index, gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); } static void i2c_w(struct gspca_dev *gspca_dev, u8 i2c_addr, const u8 *data, int len) { u8 val[2]; int i; reg_w(gspca_dev, 0x0600, data + 1, len - 1); reg_w(gspca_dev, 0x0600, data, len); val[0] = len; val[1] = i2c_addr; reg_w(gspca_dev, 0x0502, val, 2); val[0] = 0x01; reg_w(gspca_dev, 0x0501, val, 1); for (i = 5; --i >= 0; ) { msleep(4); reg_r(gspca_dev, 0x0505, 1); if (gspca_dev->usb_err < 0) return; if (gspca_dev->usb_buf[0] == 0) return; } gspca_dev->usb_err = -ETIME; } static void reg_w_buf(struct gspca_dev *gspca_dev, const u8 *cmd) { u16 reg; int len; for (;;) { reg = *cmd++ << 8; reg += *cmd++; len = *cmd++; if (len == 0) break; if (cmd[-3] != I2C0) reg_w(gspca_dev, reg, cmd, len); else i2c_w(gspca_dev, reg, cmd, len); cmd += len; } } static int swap_bits(int v) { int r, i; r = 0; for (i = 0; i < 8; i++) { r <<= 1; if (v & 1) r++; v >>= 1; } return r; } static void setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 val, v[2]; val = sd->ctrls[GAIN].val; switch (sd->webcam) { case P35u: /* Note the control goes from 0-255 not 0-127, but anything above 127 just means amplifying noise */ val >>= 1; /* 0 - 255 -> 0 - 127 */ reg_w(gspca_dev, 0x1026, &val, 1); break; case Kr651us: /* 0 - 253 */ val = swap_bits(val); v[0] = val << 3; v[1] = val >> 5; reg_w(gspca_dev, 0x101d, v, 2); /* SIF reg0/1 (AGC) */ break; } } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; s16 val; u8 v[2]; val = sd->ctrls[EXPOSURE].val; switch (sd->webcam) { case P35u: v[0] = ((9 - val) << 3) | 0x01; reg_w(gspca_dev, 0x1019, v, 1); break; case Cvideopro: case DvcV6: case Kritter: case Kr651us: v[0] = val; v[1] = val >> 8; reg_w(gspca_dev, 0x101b, v, 2); break; } } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int w, h; if (gspca_dev->ctrl_dis & (1 << AUTOGAIN)) return; if (!sd->ctrls[AUTOGAIN].val) { sd->ag_cnt = -1; return; } sd->ag_cnt = AG_CNT_START; reg_r(gspca_dev, 0x1004, 1); if (gspca_dev->usb_buf[0] & 0x04) { /* if AE_FULL_FRM */ sd->ae_res = gspca_dev->width * gspca_dev->height; } else { /* get the AE window size */ reg_r(gspca_dev, 0x1011, 8); w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0] - (gspca_dev->usb_buf[3] << 8) - gspca_dev->usb_buf[2]; h = (gspca_dev->usb_buf[5] << 8) + gspca_dev->usb_buf[4] - (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6]; sd->ae_res = h * w; if (sd->ae_res == 0) sd->ae_res = gspca_dev->width * gspca_dev->height; } } static int nw802_test_reg(struct gspca_dev *gspca_dev, u16 index, u8 value) { /* write the value */ reg_w(gspca_dev, index, &value, 1); /* read it */ reg_r(gspca_dev, index, 1); return gspca_dev->usb_buf[0] == value; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; if ((unsigned) webcam >= NWEBCAMS) webcam = 0; sd->webcam = webcam; gspca_dev->cam.ctrls = sd->ctrls; gspca_dev->cam.needs_full_bandwidth = 1; sd->ag_cnt = -1; /* * Autodetect sequence inspired from some log. * We try to detect what registers exist or not. * If 0x0500 does not exist => NW802 * If it does, test 0x109b. If it doesn't exist, * then it's a NW801. Else, a NW800 * If a et31x110 (nw800 and 06a5:d800) * get the sensor ID */ if (!nw802_test_reg(gspca_dev, 0x0500, 0x55)) { sd->bridge = BRIDGE_NW802; if (sd->webcam == Generic800) sd->webcam = Generic802; } else if (!nw802_test_reg(gspca_dev, 0x109b, 0xaa)) { sd->bridge = BRIDGE_NW801; if (sd->webcam == Generic800) sd->webcam = P35u; } else if (id->idVendor == 0x06a5 && id->idProduct == 0xd800) { reg_r(gspca_dev, 0x0403, 1); /* GPIO */ PDEBUG(D_PROBE, "et31x110 sensor type %02x", gspca_dev->usb_buf[0]); switch (gspca_dev->usb_buf[0] >> 1) { case 0x00: /* ?? */ if (sd->webcam == Generic800) sd->webcam = SpaceCam; break; case 0x01: /* Hynix? */ if (sd->webcam == Generic800) sd->webcam = Twinkle; break; case 0x0a: /* Pixart */ if (sd->webcam == Generic800) sd->webcam = SpaceCam2; break; } } if (webcam_chip[sd->webcam] != sd->bridge) { pr_err("Bad webcam type %d for NW80%d\n", sd->webcam, sd->bridge); gspca_dev->usb_err = -ENODEV; return gspca_dev->usb_err; } PDEBUG(D_PROBE, "Bridge nw80%d - type: %d", sd->bridge, sd->webcam); if (sd->bridge == BRIDGE_NW800) { switch (sd->webcam) { case DS3303u: gspca_dev->cam.cam_mode = cif_mode; /* qvga */ break; default: gspca_dev->cam.cam_mode = &cif_mode[1]; /* cif */ break; } gspca_dev->cam.nmodes = 1; } else { gspca_dev->cam.cam_mode = vga_mode; switch (sd->webcam) { case Kr651us: case Proscope: case P35u: gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); break; default: gspca_dev->cam.nmodes = 1; /* qvga only */ break; } } switch (sd->webcam) { case P35u: /* sd->ctrls[EXPOSURE].max = 9; * sd->ctrls[EXPOSURE].def = 9; */ /* coarse expo auto gain function gain minimum, to avoid * a large settings jump the first auto adjustment */ sd->ctrls[GAIN].def = 255 / 5 * 2; break; case Cvideopro: case DvcV6: case Kritter: gspca_dev->ctrl_dis = (1 << GAIN) | (1 << AUTOGAIN); /* fall thru */ case Kr651us: sd->ctrls[EXPOSURE].max = 315; sd->ctrls[EXPOSURE].def = 150; break; default: gspca_dev->ctrl_dis = (1 << GAIN) | (1 << EXPOSURE) | (1 << AUTOGAIN); break; } #if AUTOGAIN_DEF if (!(gspca_dev->ctrl_dis & (1 << AUTOGAIN))) gspca_dev->ctrl_inac = (1 << GAIN) | (1 << EXPOSURE); #endif return gspca_dev->usb_err; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->bridge) { case BRIDGE_NW800: switch (sd->webcam) { case SpaceCam: reg_w_buf(gspca_dev, spacecam_init); break; default: reg_w_buf(gspca_dev, nw800_init); break; } break; default: switch (sd->webcam) { case Mustek300: case P35u: case Proscope: reg_w_buf(gspca_dev, proscope_init); break; } break; } return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const u8 *cmd; cmd = webcam_start[sd->webcam]; reg_w_buf(gspca_dev, cmd); switch (sd->webcam) { case P35u: if (gspca_dev->width == 320) reg_w_buf(gspca_dev, nw801_start_qvga); else reg_w_buf(gspca_dev, nw801_start_vga); reg_w_buf(gspca_dev, nw801_start_2); break; case Kr651us: if (gspca_dev->width == 320) reg_w_buf(gspca_dev, kr651_start_qvga); else reg_w_buf(gspca_dev, kr651_start_vga); reg_w_buf(gspca_dev, kr651_start_2); break; case Proscope: if (gspca_dev->width == 320) reg_w_buf(gspca_dev, proscope_start_qvga); else reg_w_buf(gspca_dev, proscope_start_vga); reg_w_buf(gspca_dev, proscope_start_2); break; } setgain(gspca_dev); setexposure(gspca_dev); setautogain(gspca_dev); sd->exp_too_high_cnt = 0; sd->exp_too_low_cnt = 0; return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 value; /* 'go' off */ if (sd->bridge != BRIDGE_NW801) { value = 0x02; reg_w(gspca_dev, 0x0406, &value, 1); } /* LED off */ switch (sd->webcam) { case Cvideopro: case Kr651us: case DvcV6: case Kritter: value = 0xff; break; case Dlink350c: value = 0x21; break; case SpaceCam: case SpaceCam2: case Proscope: case Twinkle: value = 0x01; break; default: return; } reg_w(gspca_dev, 0x0404, &value, 1); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { /* * frame header = '00 00 hh ww ss xx ff ff' * with: * - 'hh': height / 4 * - 'ww': width / 4 * - 'ss': frame sequence number c0..dd */ if (data[0] == 0x00 && data[1] == 0x00 && data[6] == 0xff && data[7] == 0xff) { gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data + 8, len - 8); } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } } static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->ctrls[AUTOGAIN].val = val; if (val) gspca_dev->ctrl_inac = (1 << GAIN) | (1 << EXPOSURE); else gspca_dev->ctrl_inac = 0; if (gspca_dev->streaming) setautogain(gspca_dev); return gspca_dev->usb_err; } #include "autogain_functions.h" static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int luma; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; /* get the average luma */ reg_r(gspca_dev, sd->bridge == BRIDGE_NW801 ? 0x080d : 0x080c, 4); luma = (gspca_dev->usb_buf[3] << 24) + (gspca_dev->usb_buf[2] << 16) + (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]; luma /= sd->ae_res; switch (sd->webcam) { case P35u: coarse_grained_expo_autogain(gspca_dev, luma, 100, 5); break; default: auto_gain_n_exposure(gspca_dev, luma, 100, 5, 230, 0); break; } } /* V4L2 controls supported by the driver */ static const struct ctrl sd_ctrls[NCTRLS] = { [GAIN] = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0, .maximum = 253, .step = 1, .default_value = 128 }, .set_control = setgain }, [EXPOSURE] = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = 0, .maximum = 9, .step = 1, .default_value = 9 }, .set_control = setexposure }, [AUTOGAIN] = { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto Gain", .minimum = 0, .maximum = 1, .step = 1, .default_value = AUTOGAIN_DEF, .flags = V4L2_CTRL_FLAG_UPDATE }, .set = sd_setautogain }, }; /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x046d, 0xd001)}, {USB_DEVICE(0x0502, 0xd001)}, {USB_DEVICE(0x052b, 0xd001)}, {USB_DEVICE(0x055f, 0xd001)}, {USB_DEVICE(0x06a5, 0x0000)}, {USB_DEVICE(0x06a5, 0xd001)}, {USB_DEVICE(0x06a5, 0xd800)}, {USB_DEVICE(0x06be, 0xd001)}, {USB_DEVICE(0x0728, 0xd001)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(webcam, int, 0644); MODULE_PARM_DESC(webcam, "Webcam type\n" "0: generic\n" "1: Trust 120 SpaceCam\n" "2: other Trust 120 SpaceCam\n" "3: Conceptronic Video Pro\n" "4: D-link dru-350c\n" "5: Plustek Opticam 500U\n" "6: Panasonic GP-KR651US\n" "7: iRez Kritter\n" "8: Mustek Wcam 300 mini\n" "9: Scalar USB Microscope M2 (Proscope)\n" "10: Divio Chicony TwinkleCam\n" "11: DVC-V6\n");
gpl-2.0
tianchi-dev/android_kernel_sony_msm8226
net/netfilter/xt_RATEEST.c
5668
4722
/* * (C) 2007 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/gen_stats.h> #include <linux/jhash.h> #include <linux/rtnetlink.h> #include <linux/random.h> #include <linux/slab.h> #include <net/gen_stats.h> #include <net/netlink.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_RATEEST.h> #include <net/netfilter/xt_rateest.h> static DEFINE_MUTEX(xt_rateest_mutex); #define RATEEST_HSIZE 16 static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly; static unsigned int jhash_rnd __read_mostly; static bool rnd_inited __read_mostly; static unsigned int xt_rateest_hash(const char *name) { return jhash(name, FIELD_SIZEOF(struct xt_rateest, name), jhash_rnd) & (RATEEST_HSIZE - 1); } static void xt_rateest_hash_insert(struct xt_rateest *est) { unsigned int h; h = xt_rateest_hash(est->name); hlist_add_head(&est->list, &rateest_hash[h]); } struct xt_rateest *xt_rateest_lookup(const char *name) { struct xt_rateest *est; struct hlist_node *n; unsigned int h; h = xt_rateest_hash(name); mutex_lock(&xt_rateest_mutex); hlist_for_each_entry(est, n, &rateest_hash[h], list) { if (strcmp(est->name, name) == 0) { est->refcnt++; mutex_unlock(&xt_rateest_mutex); return est; } } mutex_unlock(&xt_rateest_mutex); return NULL; } EXPORT_SYMBOL_GPL(xt_rateest_lookup); void xt_rateest_put(struct xt_rateest *est) { mutex_lock(&xt_rateest_mutex); if (--est->refcnt == 0) { hlist_del(&est->list); gen_kill_estimator(&est->bstats, &est->rstats); /* * gen_estimator est_timer() might access est->lock or bstats, * wait a RCU grace period before freeing 'est' */ kfree_rcu(est, rcu); } mutex_unlock(&xt_rateest_mutex); } EXPORT_SYMBOL_GPL(xt_rateest_put); static unsigned int xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_rateest_target_info *info = par->targinfo; struct gnet_stats_basic_packed *stats = &info->est->bstats; spin_lock_bh(&info->est->lock); stats->bytes += skb->len; stats->packets++; spin_unlock_bh(&info->est->lock); return XT_CONTINUE; } static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) { struct xt_rateest_target_info *info = par->targinfo; struct xt_rateest *est; struct { struct nlattr opt; struct gnet_estimator est; } cfg; int ret; if (unlikely(!rnd_inited)) { get_random_bytes(&jhash_rnd, sizeof(jhash_rnd)); rnd_inited = true; } est = xt_rateest_lookup(info->name); if (est) { /* * If estimator parameters are specified, they must match the * existing estimator. */ if ((!info->interval && !info->ewma_log) || (info->interval != est->params.interval || info->ewma_log != est->params.ewma_log)) { xt_rateest_put(est); return -EINVAL; } info->est = est; return 0; } ret = -ENOMEM; est = kzalloc(sizeof(*est), GFP_KERNEL); if (!est) goto err1; strlcpy(est->name, info->name, sizeof(est->name)); spin_lock_init(&est->lock); est->refcnt = 1; est->params.interval = info->interval; est->params.ewma_log = info->ewma_log; cfg.opt.nla_len = nla_attr_size(sizeof(cfg.est)); cfg.opt.nla_type = TCA_STATS_RATE_EST; cfg.est.interval = info->interval; cfg.est.ewma_log = info->ewma_log; ret = gen_new_estimator(&est->bstats, &est->rstats, &est->lock, &cfg.opt); if (ret < 0) goto err2; info->est = est; xt_rateest_hash_insert(est); return 0; err2: kfree(est); err1: return ret; } static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par) { struct xt_rateest_target_info *info = par->targinfo; xt_rateest_put(info->est); } static struct xt_target xt_rateest_tg_reg __read_mostly = { .name = "RATEEST", .revision = 0, .family = NFPROTO_UNSPEC, .target = xt_rateest_tg, .checkentry = xt_rateest_tg_checkentry, .destroy = xt_rateest_tg_destroy, .targetsize = sizeof(struct xt_rateest_target_info), .me = THIS_MODULE, }; static int __init xt_rateest_tg_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rateest_hash); i++) INIT_HLIST_HEAD(&rateest_hash[i]); return xt_register_target(&xt_rateest_tg_reg); } static void __exit xt_rateest_tg_fini(void) { xt_unregister_target(&xt_rateest_tg_reg); } MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: packet rate estimator"); MODULE_ALIAS("ipt_RATEEST"); MODULE_ALIAS("ip6t_RATEEST"); module_init(xt_rateest_tg_init); module_exit(xt_rateest_tg_fini);
gpl-2.0
giveme13s/android_kernel_oneplus_msm8974
arch/arm/plat-mxc/devices/platform-mxc_rtc.c
7716
1032
/* * Copyright (C) 2010-2011 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_mxc_rtc_data_entry_single(soc) \ { \ .iobase = soc ## _RTC_BASE_ADDR, \ .irq = soc ## _INT_RTC, \ } #ifdef CONFIG_SOC_IMX31 const struct imx_mxc_rtc_data imx31_mxc_rtc_data __initconst = imx_mxc_rtc_data_entry_single(MX31); #endif /* ifdef CONFIG_SOC_IMX31 */ struct platform_device *__init imx_add_mxc_rtc( const struct imx_mxc_rtc_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("mxc_rtc", -1, res, ARRAY_SIZE(res), NULL, 0); }
gpl-2.0
Savaged-Zen/Savaged-Zen-Speedy
drivers/rtc/rtc-au1xxx.c
10020
3636
/* * Au1xxx counter0 (aka Time-Of-Year counter) RTC interface driver. * * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ /* All current Au1xxx SoCs have 2 counters fed by an external 32.768 kHz * crystal. Counter 0, which keeps counting during sleep/powerdown, is * used to count seconds since the beginning of the unix epoch. * * The counters must be configured and enabled by bootloader/board code; * no checks as to whether they really get a proper 32.768kHz clock are * made as this would take far too long. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach-au1x00/au1000.h> /* 32kHz clock enabled and detected */ #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long t; t = au_readl(SYS_TOYREAD); rtc_time_to_tm(t, tm); return rtc_valid_tm(tm); } static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned long t; rtc_tm_to_time(tm, &t); au_writel(t, SYS_TOYWRITE); au_sync(); /* wait for the pending register write to succeed. This can * take up to 6 seconds... */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) msleep(1); return 0; } static struct rtc_class_ops au1xtoy_rtc_ops = { .read_time = au1xtoy_rtc_read_time, .set_time = au1xtoy_rtc_set_time, }; static int __devinit au1xtoy_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtcdev; unsigned long t; int ret; t = au_readl(SYS_COUNTER_CNTRL); if (!(t & CNTR_OK)) { dev_err(&pdev->dev, "counters not working; aborting.\n"); ret = -ENODEV; goto out_err; } ret = -ETIMEDOUT; /* set counter0 tickrate to 1Hz if necessary */ if (au_readl(SYS_TOYTRIM) != 32767) { /* wait until hardware gives access to TRIM register */ t = 0x00100000; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && --t) msleep(1); if (!t) { /* timed out waiting for register access; assume * counters are unusable. */ dev_err(&pdev->dev, "timeout waiting for access\n"); goto out_err; } /* set 1Hz TOY tick rate */ au_writel(32767, SYS_TOYTRIM); au_sync(); } /* wait until the hardware allows writes to the counter reg */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) msleep(1); rtcdev = rtc_device_register("rtc-au1xxx", &pdev->dev, &au1xtoy_rtc_ops, THIS_MODULE); if (IS_ERR(rtcdev)) { ret = PTR_ERR(rtcdev); goto out_err; } platform_set_drvdata(pdev, rtcdev); return 0; out_err: return ret; } static int __devexit au1xtoy_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtcdev = platform_get_drvdata(pdev); rtc_device_unregister(rtcdev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver au1xrtc_driver = { .driver = { .name = "rtc-au1xxx", .owner = THIS_MODULE, }, .remove = __devexit_p(au1xtoy_rtc_remove), }; static int __init au1xtoy_rtc_init(void) { return platform_driver_probe(&au1xrtc_driver, au1xtoy_rtc_probe); } static void __exit au1xtoy_rtc_exit(void) { platform_driver_unregister(&au1xrtc_driver); } module_init(au1xtoy_rtc_init); module_exit(au1xtoy_rtc_exit); MODULE_DESCRIPTION("Au1xxx TOY-counter-based RTC driver"); MODULE_AUTHOR("Manuel Lauss <manuel.lauss@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-au1xxx");
gpl-2.0
pali/linux-n900
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
37
7783
/* * Copyright (c) 2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> #include <net/busy_poll.h> #include "en.h" #include "en_tc.h" static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) { return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; } static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { struct sk_buff *skb; dma_addr_t dma_addr; skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); if (unlikely(!skb)) return -ENOMEM; dma_addr = dma_map_single(rq->pdev, /* hw start padding */ skb->data, /* hw end padding */ rq->wqe_sz, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) goto err_free_skb; skb_reserve(skb, MLX5E_NET_IP_ALIGN); *((dma_addr_t *)skb->cb) = dma_addr; wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); rq->skb[ix] = skb; return 0; err_free_skb: dev_kfree_skb(skb); return -ENOMEM; } bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) return false; while (!mlx5_wq_ll_is_full(wq)) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) break; mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); } /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); mlx5_wq_ll_update_db_record(wq); return !mlx5_wq_ll_is_full(wq); } static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); struct tcphdr *tcp; u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; if (eth->h_proto == htons(ETH_P_IP)) { tcp = (struct tcphdr *)(skb->data + ETH_HLEN + sizeof(struct iphdr)); ipv6 = NULL; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } else { tcp = (struct tcphdr *)(skb->data + ETH_HLEN + sizeof(struct ipv6hdr)); ipv4 = NULL; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; } if (get_cqe_lro_tcppsh(cqe)) tcp->psh = 1; if (tcp_ack) { tcp->ack = 1; tcp->ack_seq = cqe->lro_ack_seq_num; tcp->window = cqe->lro_tcp_win; } if (ipv4) { ipv4->ttl = cqe->lro_min_ttl; ipv4->tot_len = cpu_to_be16(tot_len); ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, ipv4->ihl); } else { ipv6->hop_limit = cqe->lro_min_ttl; ipv6->payload_len = cpu_to_be16(tot_len - sizeof(struct ipv6hdr)); } } static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { u8 cht = cqe->rss_hash_type; int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_NONE; skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } static inline bool is_first_ethertype_ip(struct sk_buff *skb) { __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb, bool lro) { if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (likely(is_first_ethertype_ip(skb))) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); rq->stats.csum_sw++; } else { goto csum_none; } return; csum_none: skb->ip_summed = CHECKSUM_NONE; rq->stats.csum_none++; } static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb) { struct net_device *netdev = rq->netdev; u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); struct mlx5e_tstamp *tstamp = rq->tstamp; int lro_num_seg; skb_put(skb, cqe_bcnt); lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; } if (unlikely(mlx5e_rx_hw_stamp(tstamp))) mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, rq->ix); if (likely(netdev->features & NETIF_F_RXHASH)) mlx5e_skb_set_hash(cqe, skb); if (cqe_has_vlan(cqe)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done; for (work_done = 0; work_done < budget; work_done++) { struct mlx5e_rx_wqe *wqe; struct mlx5_cqe64 *cqe; struct sk_buff *skb; __be16 wqe_counter_be; u16 wqe_counter; cqe = mlx5e_get_cqe(cq); if (!cqe) break; mlx5_cqwq_pop(&cq->wq); wqe_counter_be = cqe->wqe_counter; wqe_counter = be16_to_cpu(wqe_counter_be); wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); skb = rq->skb[wqe_counter]; prefetch(skb->data); rq->skb[wqe_counter] = NULL; dma_unmap_single(rq->pdev, *((dma_addr_t *)skb->cb), rq->wqe_sz, DMA_FROM_DEVICE); if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { rq->stats.wqe_err++; dev_kfree_skb(skb); goto wq_ll_pop; } mlx5e_build_rx_skb(cqe, rq, skb); rq->stats.packets++; rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); napi_gro_receive(cq->napi, skb); wq_ll_pop: mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, &wqe->next.next_wqe_index); } mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ wmb(); return work_done; }
gpl-2.0
artemh/asuswrt-merlin
release/src-rt-7.x.main/src/linux/linux-2.6.36/net/ipv4/netfilter/ipt_set.c
37
6987
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Martin Josefsson <gandalf@wlug.westbo.se> * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module to match an IP set. */ #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/version.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #include <linux/netfilter_ipv4/ip_tables.h> #define xt_register_match ipt_register_match #define xt_unregister_match ipt_unregister_match #define xt_match ipt_match #else #include <linux/netfilter/x_tables.h> #endif #include <linux/netfilter_ipv4/ip_set.h> #include <linux/netfilter_ipv4/ipt_set.h> static inline int match_set(const struct ipt_set_info *info, const struct sk_buff *skb, int inv) { if (ip_set_testip_kernel(info->index, skb, info->flags)) inv = !inv; return inv; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const void *matchinfo, int offset, const void *hdr, u_int16_t datalen, int *hotdrop) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const void *matchinfo, int offset, int *hotdrop) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const void *matchinfo, int offset, unsigned int protoff, int *hotdrop) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct xt_match *match, const void *matchinfo, int offset, unsigned int protoff, int *hotdrop) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct xt_match *match, const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) static bool match(const struct sk_buff *skb, const struct xt_match_param *par) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */ static bool match(const struct sk_buff *skb, struct xt_action_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ipt_set_info_match *info = matchinfo; #else const struct ipt_set_info_match *info = par->matchinfo; #endif return match_set(&info->match_set, skb, info->match_set.flags[0] & IPSET_MATCH_INV); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) #define CHECK_OK 1 #define CHECK_FAIL 0 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */ #define CHECK_OK 0 #define CHECK_FAIL -EINVAL #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) static int checkentry(const char *tablename, const struct ipt_ip *ip, void *matchinfo, unsigned int matchsize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static int checkentry(const char *tablename, const void *inf, void *matchinfo, unsigned int matchsize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) static int checkentry(const char *tablename, const void *inf, const struct xt_match *match, void *matchinfo, unsigned int matchsize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) static int checkentry(const char *tablename, const void *inf, const struct xt_match *match, void *matchinfo, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool checkentry(const char *tablename, const void *inf, const struct xt_match *match, void *matchinfo, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) static bool checkentry(const struct xt_mtchk_param *par) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */ static int checkentry(const struct xt_mtchk_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) struct ipt_set_info_match *info = matchinfo; #else struct ipt_set_info_match *info = par->matchinfo; #endif ip_set_id_t index; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) { ip_set_printk("invalid matchsize %d", matchsize); return CHECK_FAIL; } #endif index = ip_set_get_byindex(info->match_set.index); if (index == IP_SET_INVALID_ID) { ip_set_printk("Cannot find set indentified by id %u to match", info->match_set.index); return CHECK_FAIL; /* error */ } if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) { ip_set_printk("That's nasty!"); return CHECK_FAIL; /* error */ } return CHECK_OK; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static void destroy(void *matchinfo, unsigned int matchsize) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) static void destroy(const struct xt_match *match, void *matchinfo, unsigned int matchsize) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static void destroy(const struct xt_match *match, void *matchinfo) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static void destroy(const struct xt_mtdtor_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) struct ipt_set_info_match *info = matchinfo; #else struct ipt_set_info_match *info = par->matchinfo; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) { ip_set_printk("invalid matchsize %d", matchsize); return; } #endif ip_set_put_byindex(info->match_set.index); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static struct xt_match set_match = { .name = "set", .match = &match, .checkentry = &checkentry, .destroy = &destroy, .me = THIS_MODULE }; #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */ static struct xt_match set_match = { .name = "set", .family = AF_INET, .match = &match, .matchsize = sizeof(struct ipt_set_info_match), .checkentry = &checkentry, .destroy = &destroy, .me = THIS_MODULE }; #endif MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("iptables IP set match module"); static int __init ipt_ipset_init(void) { return xt_register_match(&set_match); } static void __exit ipt_ipset_fini(void) { xt_unregister_match(&set_match); } module_init(ipt_ipset_init); module_exit(ipt_ipset_fini);
gpl-2.0
mtitinger/linux-pm
drivers/infiniband/hw/mlx5/srq.c
549
12561
/* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/srq.h> #include <linux/slab.h> #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> #include "mlx5_ib.h" #include "user.h" /* not supported currently */ static int srq_signature; static void *get_wqe(struct mlx5_ib_srq *srq, int n) { return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); } static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) { struct ib_event event; struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; if (ibsrq->event_handler) { event.device = ibsrq->device; event.element.srq = ibsrq; switch (type) { case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: event.event = IB_EVENT_SRQ_LIMIT_REACHED; break; case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: event.event = IB_EVENT_SRQ_ERR; break; default: pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", type, srq->srqn); return; } ibsrq->event_handler(&event, ibsrq->srq_context); } } static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, struct mlx5_create_srq_mbox_in **in, struct ib_udata *udata, int buf_size, int *inlen) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_create_srq ucmd; size_t ucmdlen; int err; int npages; int page_shift; int ncont; u32 offset; ucmdlen = (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < sizeof(ucmd)) ? (sizeof(ucmd) - sizeof(ucmd.reserved)) : sizeof(ucmd); if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { mlx5_ib_dbg(dev, "failed copy udata\n"); return -EFAULT; } if (ucmdlen == sizeof(ucmd) && ucmd.reserved != 0) return -EINVAL; srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 0, 0); if (IS_ERR(srq->umem)) { mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); err = PTR_ERR(srq->umem); return err; } mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, &page_shift, &ncont, NULL); err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); if (err) { mlx5_ib_warn(dev, "bad offset\n"); goto err_umem; } *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; *in = mlx5_vzalloc(*inlen); if (!(*in)) { err = -ENOMEM; goto err_umem; } mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &srq->db); if (err) { mlx5_ib_dbg(dev, "map doorbell failed\n"); goto err_in; } (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); return 0; err_in: kvfree(*in); err_umem: ib_umem_release(srq->umem); return err; } static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, struct mlx5_create_srq_mbox_in **in, int buf_size, int *inlen) { int err; int i; struct mlx5_wqe_srq_next_seg *next; int page_shift; int npages; err = mlx5_db_alloc(dev->mdev, &srq->db); if (err) { mlx5_ib_warn(dev, "alloc dbell rec failed\n"); return err; } if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) { mlx5_ib_dbg(dev, "buf alloc failed\n"); err = -ENOMEM; goto err_db; } page_shift = srq->buf.page_shift; srq->head = 0; srq->tail = srq->msrq.max - 1; srq->wqe_ctr = 0; for (i = 0; i < srq->msrq.max; i++) { next = get_wqe(srq, i); next->next_wqe_index = cpu_to_be16((i + 1) & (srq->msrq.max - 1)); } npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", buf_size, page_shift, srq->buf.npages, npages); *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; *in = mlx5_vzalloc(*inlen); if (!*in) { err = -ENOMEM; goto err_buf; } mlx5_fill_page_array(&srq->buf, (*in)->pas); srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); if (!srq->wrid) { mlx5_ib_dbg(dev, "kmalloc failed %lu\n", (unsigned long)(srq->msrq.max * sizeof(u64))); err = -ENOMEM; goto err_in; } srq->wq_sig = !!srq_signature; (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; return 0; err_in: kvfree(*in); err_buf: mlx5_buf_free(dev->mdev, &srq->buf); err_db: mlx5_db_free(dev->mdev, &srq->db); return err; } static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) { mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); ib_umem_release(srq->umem); } static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) { kfree(srq->wrid); mlx5_buf_free(dev->mdev, &srq->buf); mlx5_db_free(dev->mdev, &srq->db); } struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_srq *srq; int desc_size; int buf_size; int err; struct mlx5_create_srq_mbox_in *uninitialized_var(in); int uninitialized_var(inlen); int is_xrc; u32 flgs, xrcdn; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); /* Sanity check SRQ size before proceeding */ if (init_attr->attr.max_wr >= max_srq_wqes) { mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", init_attr->attr.max_wr, max_srq_wqes); return ERR_PTR(-EINVAL); } srq = kmalloc(sizeof(*srq), GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); mutex_init(&srq->mutex); spin_lock_init(&srq->lock); srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->msrq.max_gs = init_attr->attr.max_sge; desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); desc_size = roundup_pow_of_two(desc_size); desc_size = max_t(int, 32, desc_size); srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, srq->msrq.max_avail_gather); if (pd->uobject) err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); else err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); if (err) { mlx5_ib_warn(dev, "create srq %s failed, err %d\n", pd->uobject ? "user" : "kernel", err); goto err_srq; } is_xrc = (init_attr->srq_type == IB_SRQT_XRC); in->ctx.state_log_sz = ilog2(srq->msrq.max); flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; xrcdn = 0; if (is_xrc) { xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); } else if (init_attr->srq_type == IB_SRQT_BASIC) { xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); } in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); in->ctx.db_record = cpu_to_be64(srq->db.dma); err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc); kvfree(in); if (err) { mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); goto err_usr_kern_srq; } mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); srq->msrq.event = mlx5_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; if (pd->uobject) if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { mlx5_ib_dbg(dev, "copy to user failed\n"); err = -EFAULT; goto err_core; } init_attr->attr.max_wr = srq->msrq.max - 1; return &srq->ibsrq; err_core: mlx5_core_destroy_srq(dev->mdev, &srq->msrq); err_usr_kern_srq: if (pd->uobject) destroy_srq_user(pd, srq); else destroy_srq_kernel(dev, srq); err_srq: kfree(srq); return ERR_PTR(err); } int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); struct mlx5_ib_srq *srq = to_msrq(ibsrq); int ret; /* We don't support resizing SRQs yet */ if (attr_mask & IB_SRQ_MAX_WR) return -EINVAL; if (attr_mask & IB_SRQ_LIMIT) { if (attr->srq_limit >= srq->msrq.max) return -EINVAL; mutex_lock(&srq->mutex); ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); mutex_unlock(&srq->mutex); if (ret) return ret; } return 0; } int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); struct mlx5_ib_srq *srq = to_msrq(ibsrq); int ret; struct mlx5_query_srq_mbox_out *out; out = kzalloc(sizeof(*out), GFP_KERNEL); if (!out) return -ENOMEM; ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); if (ret) goto out_box; srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); srq_attr->max_wr = srq->msrq.max - 1; srq_attr->max_sge = srq->msrq.max_gs; out_box: kfree(out); return ret; } int mlx5_ib_destroy_srq(struct ib_srq *srq) { struct mlx5_ib_dev *dev = to_mdev(srq->device); struct mlx5_ib_srq *msrq = to_msrq(srq); mlx5_core_destroy_srq(dev->mdev, &msrq->msrq); if (srq->uobject) { mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); ib_umem_release(msrq->umem); } else { destroy_srq_kernel(dev, msrq); } kfree(srq); return 0; } void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) { struct mlx5_wqe_srq_next_seg *next; /* always called with interrupts disabled. */ spin_lock(&srq->lock); next = get_wqe(srq, srq->tail); next->next_wqe_index = cpu_to_be16(wqe_index); srq->tail = wqe_index; spin_unlock(&srq->lock); } int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mlx5_ib_srq *srq = to_msrq(ibsrq); struct mlx5_wqe_srq_next_seg *next; struct mlx5_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int i; spin_lock_irqsave(&srq->lock, flags); for (nreq = 0; wr; nreq++, wr = wr->next) { if (unlikely(wr->num_sge > srq->msrq.max_gs)) { err = -EINVAL; *bad_wr = wr; break; } if (unlikely(srq->head == srq->tail)) { err = -ENOMEM; *bad_wr = wr; break; } srq->wrid[srq->head] = wr->wr_id; next = get_wqe(srq, srq->head); srq->head = be16_to_cpu(next->next_wqe_index); scat = (struct mlx5_wqe_data_seg *)(next + 1); for (i = 0; i < wr->num_sge; i++) { scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); } if (i < srq->msrq.max_avail_gather) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); scat[i].addr = 0; } } if (likely(nreq)) { srq->wqe_ctr += nreq; /* Make sure that descriptors are written before * doorbell record. */ wmb(); *srq->db.db = cpu_to_be32(srq->wqe_ctr); } spin_unlock_irqrestore(&srq->lock, flags); return err; }
gpl-2.0
jakeclawson/linux
arch/x86/kernel/step.c
549
6182
/* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> #include <asm/mmu_context.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs & 0xffff; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } #ifdef CONFIG_MODIFY_LDT_SYSCALL /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg >>= 3; mutex_lock(&child->mm->context.lock); if (unlikely(!child->mm->context.ldt || seg >= child->mm->context.ldt->size)) addr = -1L; /* bogus selector, access would fault */ else { desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } #endif return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP - this guarantees that * we single-step system calls etc.. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if * task is current or it can't be running, otherwise we can race * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but * PTRACE_KILL is not safe. */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) set_task_blockstep(child, true); else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
gpl-2.0
kingklick/kk-glacier-kernel
arch/mips/bcm63xx/dev-uart.c
549
1114
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <bcm63xx_cpu.h> static struct resource uart_resources[] = { { .start = -1, /* filled at runtime */ .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device bcm63xx_uart_device = { .name = "bcm63xx_uart", .id = 0, .num_resources = ARRAY_SIZE(uart_resources), .resource = uart_resources, }; int __init bcm63xx_uart_register(void) { uart_resources[0].start = bcm63xx_regset_address(RSET_UART0); uart_resources[0].end = uart_resources[0].start; uart_resources[0].end += RSET_UART_SIZE - 1; uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); return platform_device_register(&bcm63xx_uart_device); } arch_initcall(bcm63xx_uart_register);
gpl-2.0
Split-Screen/android_kernel_mediatek_sprout
net/sched/act_api.c
1317
24430
/* * net/sched/act_api.c Packet action API. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Author: Jamal Hadi Salim * * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/module.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/sch_generic.h> #include <net/act_api.h> #include <net/netlink.h> void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); struct tcf_common **p1p; for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { if (*p1p == p) { write_lock_bh(hinfo->lock); *p1p = p->tcfc_next; write_unlock_bh(hinfo->lock); gen_kill_estimator(&p->tcfc_bstats, &p->tcfc_rate_est); /* * gen_estimator est_timer() might access p->tcfc_lock * or bstats, wait a RCU grace period before freeing p */ kfree_rcu(p, tcfc_rcu); return; } } WARN_ON(1); } EXPORT_SYMBOL(tcf_hash_destroy); int tcf_hash_release(struct tcf_common *p, int bind, struct tcf_hashinfo *hinfo) { int ret = 0; if (p) { if (bind) p->tcfc_bindcnt--; p->tcfc_refcnt--; if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { tcf_hash_destroy(p, hinfo); ret = 1; } } return ret; } EXPORT_SYMBOL(tcf_hash_release); static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct nlattr *nest; read_lock_bh(hinfo->lock); s_i = cb->args[0]; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; for (; p; p = p->tcfc_next) { index++; if (index < s_i) continue; a->priv = p; a->order = n_i; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; nlmsg_trim(skb, nest); goto done; } nla_nest_end(skb, nest); n_i++; if (n_i >= TCA_ACT_MAX_PRIO) goto done; } } done: read_unlock_bh(hinfo->lock); if (n_i) cb->args[0] += n_i; return n_i; nla_put_failure: nla_nest_cancel(skb, nest); goto done; } static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p, *s_p; struct nlattr *nest; int i = 0, n_i = 0; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; if (nla_put_string(skb, TCA_KIND, a->ops->kind)) goto nla_put_failure; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; while (p != NULL) { s_p = p->tcfc_next; if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) module_put(a->ops->owner); n_i++; p = s_p; } } if (nla_put_u32(skb, TCA_FCNT, n_i)) goto nla_put_failure; nla_nest_end(skb, nest); return n_i; nla_put_failure: nla_nest_cancel(skb, nest); return -EINVAL; } int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tcf_hashinfo *hinfo = a->ops->hinfo; if (type == RTM_DELACTION) { return tcf_del_walker(skb, a, hinfo); } else if (type == RTM_GETACTION) { return tcf_dump_walker(skb, cb, a, hinfo); } else { WARN(1, "tcf_generic_walker: unknown action %d\n", type); return -EINVAL; } } EXPORT_SYMBOL(tcf_generic_walker); struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) { struct tcf_common *p; read_lock_bh(hinfo->lock); for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; p = p->tcfc_next) { if (p->tcfc_index == index) break; } read_unlock_bh(hinfo->lock); return p; } EXPORT_SYMBOL(tcf_hash_lookup); u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) { u32 val = *idx_gen; do { if (++val == 0) val = 1; } while (tcf_hash_lookup(val, hinfo)); return (*idx_gen = val); } EXPORT_SYMBOL(tcf_hash_new_index); int tcf_hash_search(struct tc_action *a, u32 index) { struct tcf_hashinfo *hinfo = a->ops->hinfo; struct tcf_common *p = tcf_hash_lookup(index, hinfo); if (p) { a->priv = p; return 1; } return 0; } EXPORT_SYMBOL(tcf_hash_search); struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, struct tcf_hashinfo *hinfo) { struct tcf_common *p = NULL; if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { if (bind) p->tcfc_bindcnt++; p->tcfc_refcnt++; a->priv = p; } return p; } EXPORT_SYMBOL(tcf_hash_check); struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) { struct tcf_common *p = kzalloc(size, GFP_KERNEL); if (unlikely(!p)) return ERR_PTR(-ENOMEM); p->tcfc_refcnt = 1; if (bind) p->tcfc_bindcnt = 1; spin_lock_init(&p->tcfc_lock); p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); p->tcfc_tm.install = jiffies; p->tcfc_tm.lastuse = jiffies; if (est) { int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, &p->tcfc_lock, est); if (err) { kfree(p); return ERR_PTR(err); } } a->priv = (void *) p; return p; } EXPORT_SYMBOL(tcf_hash_create); void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); write_lock_bh(hinfo->lock); p->tcfc_next = hinfo->htab[h]; hinfo->htab[h] = p; write_unlock_bh(hinfo->lock); } EXPORT_SYMBOL(tcf_hash_insert); static struct tc_action_ops *act_base = NULL; static DEFINE_RWLOCK(act_mod_lock); int tcf_register_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { write_unlock(&act_mod_lock); return -EEXIST; } } act->next = NULL; *ap = act; write_unlock(&act_mod_lock); return 0; } EXPORT_SYMBOL(tcf_register_action); int tcf_unregister_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; int err = -ENOENT; write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) if (a == act) break; if (a) { *ap = a->next; a->next = NULL; err = 0; } write_unlock(&act_mod_lock); return err; } EXPORT_SYMBOL(tcf_unregister_action); /* lookup by name */ static struct tc_action_ops *tc_lookup_action_n(char *kind) { struct tc_action_ops *a = NULL; if (kind) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (strcmp(kind, a->kind) == 0) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } /* lookup by nlattr */ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) { struct tc_action_ops *a = NULL; if (kind) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (nla_strcmp(kind, a->kind) == 0) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } #if 0 /* lookup by id */ static struct tc_action_ops *tc_lookup_action_id(u32 type) { struct tc_action_ops *a = NULL; if (type) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (a->type == type) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } #endif int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act, struct tcf_result *res) { const struct tc_action *a; int ret = -1; if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); ret = TC_ACT_OK; goto exec_done; } while ((a = act) != NULL) { repeat: if (a->ops && a->ops->act) { ret = a->ops->act(skb, a, res); if (TC_MUNGED & skb->tc_verd) { /* copied already, allow trampling */ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); } if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ if (ret != TC_ACT_PIPE) goto exec_done; } act = a->next; } exec_done: return ret; } EXPORT_SYMBOL(tcf_action_exec); void tcf_action_destroy(struct tc_action *act, int bind) { struct tc_action *a; for (a = act; a; a = act) { if (a->ops && a->ops->cleanup) { if (a->ops->cleanup(a, bind) == ACT_P_DELETED) module_put(a->ops->owner); act = act->next; kfree(a); } else { /*FIXME: Remove later - catch insertion bugs*/ WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n"); act = act->next; kfree(a); } } } int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; if (a->ops == NULL || a->ops->dump == NULL) return err; return a->ops->dump(skb, a, bind, ref); } int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; if (a->ops == NULL || a->ops->dump == NULL) return err; if (nla_put_string(skb, TCA_KIND, a->ops->kind)) goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_old(skb, a, bind, ref); if (err > 0) { nla_nest_end(skb, nest); return err; } nla_put_failure: nlmsg_trim(skb, b); return -1; } EXPORT_SYMBOL(tcf_action_dump_1); int tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) { struct tc_action *a; int err = -EINVAL; struct nlattr *nest; while ((a = act) != NULL) { act = a->next; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, bind, ref); if (err < 0) goto errout; nla_nest_end(skb, nest); } return 0; nla_put_failure: err = -EINVAL; errout: nla_nest_cancel(skb, nest); return err; } struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct tc_action *a; struct tc_action_ops *a_o; char act_name[IFNAMSIZ]; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; int err; if (name == NULL) { err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; if (kind == NULL) goto err_out; if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } else { err = -EINVAL; if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES rtnl_unlock(); request_module("act_%s", act_name); rtnl_lock(); a_o = tc_lookup_action_n(act_name); /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * tell the caller to replay the request. We * indicate this using -EAGAIN. */ if (a_o != NULL) { err = -EAGAIN; goto err_mod; } #endif err = -ENOENT; goto err_out; } err = -ENOMEM; a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) goto err_mod; /* backward compatibility for policer */ if (name == NULL) err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind); else err = a_o->init(net, nla, est, a, ovr, bind); if (err < 0) goto err_free; /* module count goes up only when brand new policy is created * if it exists and is only bound to in a_o->init() then * ACT_P_CREATED is not returned (a zero is). */ if (err != ACT_P_CREATED) module_put(a_o->owner); a->ops = a_o; return a; err_free: kfree(a); err_mod: module_put(a_o->owner); err_out: return ERR_PTR(err); } struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *head = NULL, *act, *act_prev = NULL; int err; int i; err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (err < 0) return ERR_PTR(err); for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_init_1(net, tb[i], est, name, ovr, bind); if (IS_ERR(act)) goto err; act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } return head; err: if (head != NULL) tcf_action_destroy(head, bind); return act; } int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, int compat_mode) { int err = 0; struct gnet_dump d; struct tcf_act_hdr *h = a->priv; if (h == NULL) goto errout; /* compat_mode being true specifies a call that is supposed * to add additional backward compatibility statistic TLVs. */ if (compat_mode) { if (a->type == TCA_OLD_COMPAT) err = gnet_stats_start_copy_compat(skb, 0, TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d); else return 0; } else err = gnet_stats_start_copy(skb, TCA_ACT_STATS, &h->tcf_lock, &d); if (err < 0) goto errout; if (a->ops != NULL && a->ops->get_stats != NULL) if (a->ops->get_stats(skb, a) < 0) goto errout; if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || gnet_stats_copy_rate_est(&d, &h->tcf_bstats, &h->tcf_rate_est) < 0 || gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) goto errout; if (gnet_stats_finish_copy(&d) < 0) goto errout; return 0; errout: return -1; } static int tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq, u16 flags, int event, int bind, int ref) { struct tcamsg *t; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); if (!nlh) goto out_nlmsg_trim; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_nlmsg_trim; if (tcf_action_dump(skb, a, bind, ref) < 0) goto out_nlmsg_trim; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; out_nlmsg_trim: nlmsg_trim(skb, b); return -1; } static int act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, struct tc_action *a, int event) { struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tca_get_fill(skb, a, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { kfree_skb(skb); return -EINVAL; } return rtnl_unicast(skb, net, portid); } static struct tc_action * tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid) { struct nlattr *tb[TCA_ACT_MAX + 1]; struct tc_action *a; int index; int err; err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; if (tb[TCA_ACT_INDEX] == NULL || nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) goto err_out; index = nla_get_u32(tb[TCA_ACT_INDEX]); err = -ENOMEM; a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); if (a == NULL) goto err_out; err = -EINVAL; a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); if (a->ops == NULL) goto err_free; if (a->ops->lookup == NULL) goto err_mod; err = -ENOENT; if (a->ops->lookup(a, index) == 0) goto err_mod; module_put(a->ops->owner); return a; err_mod: module_put(a->ops->owner); err_free: kfree(a); err_out: return ERR_PTR(err); } static void cleanup_a(struct tc_action *act) { struct tc_action *a; for (a = act; a; a = act) { act = a->next; kfree(a); } } static struct tc_action *create_a(int i) { struct tc_action *act; act = kzalloc(sizeof(*act), GFP_KERNEL); if (act == NULL) { pr_debug("create_a: failed to alloc!\n"); return NULL; } act->order = i; return act; } static int tca_action_flush(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 portid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_module_put; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto out_module_put; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (err > 0) return 0; return err; out_module_put: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; } static int tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 portid, int event) { int i, ret; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *head = NULL, *act, *act_prev = NULL; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (ret < 0) return ret; if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { if (tb[1] != NULL) return tca_action_flush(net, tb[1], n, portid); else return -EINVAL; } for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_get_1(tb[i], n, portid); if (IS_ERR(act)) { ret = PTR_ERR(act); goto err; } act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } if (event == RTM_GETACTION) ret = act_get_notify(net, portid, n, head, event); else { /* delete */ struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ret = -ENOBUFS; goto err; } if (tca_get_fill(skb, head, portid, n->nlmsg_seq, 0, event, 0, 1) <= 0) { kfree_skb(skb); ret = -EINVAL; goto err; } /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (ret > 0) return 0; return ret; } err: cleanup_a(head); return ret; } static int tcf_add_notify(struct net *net, struct tc_action *a, u32 portid, u32 seq, int event, u16 flags) { struct tcamsg *t; struct nlmsghdr *nlh; struct sk_buff *skb; struct nlattr *nest; unsigned char *b; int err = 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; b = skb_tail_pointer(skb); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); if (!nlh) goto out_kfree_skb; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_kfree_skb; if (tcf_action_dump(skb, a, 0, 0) < 0) goto out_kfree_skb; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); if (err > 0) err = 0; return err; out_kfree_skb: kfree_skb(skb); return -1; } static int tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 portid, int ovr) { int ret = 0; struct tc_action *act; struct tc_action *a; u32 seq = n->nlmsg_seq; act = tcf_action_init(net, nla, NULL, NULL, ovr, 0); if (act == NULL) goto done; if (IS_ERR(act)) { ret = PTR_ERR(act); goto done; } /* dump then free all the actions after update; inserted policy * stays intact */ ret = tcf_add_notify(net, act, portid, seq, RTM_NEWACTION, n->nlmsg_flags); for (a = act; a; a = act) { act = a->next; kfree(a); } done: return ret; } static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_ACT_MAX + 1]; u32 portid = skb ? NETLINK_CB(skb).portid : 0; int ret = 0, ovr = 0; if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); if (ret < 0) return ret; if (tca[TCA_ACT_TAB] == NULL) { pr_notice("tc_ctl_action: received NO action attribs\n"); return -EINVAL; } /* n->nlmsg_flags & NLM_F_CREATE */ switch (n->nlmsg_type) { case RTM_NEWACTION: /* we are going to assume all other flags * imply create only if it doesn't exist * Note that CREATE | EXCL implies that * but since we want avoid ambiguity (eg when flags * is zero) then just set this */ if (n->nlmsg_flags & NLM_F_REPLACE) ovr = 1; replay: ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr); if (ret == -EAGAIN) goto replay; break; case RTM_DELACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, portid, RTM_DELACTION); break; case RTM_GETACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, portid, RTM_GETACTION); break; default: BUG(); } return ret; } static struct nlattr * find_dump_kind(const struct nlmsghdr *n) { struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *kind; if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) return NULL; tb1 = nla[TCA_ACT_TAB]; if (tb1 == NULL) return NULL; if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) return NULL; if (tb[1] == NULL) return NULL; if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), nla_len(tb[1]), NULL) < 0) return NULL; kind = tb2[TCA_ACT_KIND]; return kind; } static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; struct tc_action_ops *a_o; struct tc_action a; int ret = 0; struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); struct nlattr *kind = find_dump_kind(cb->nlh); if (kind == NULL) { pr_info("tc_dump_action: action bad kind\n"); return 0; } a_o = tc_lookup_action(kind); if (a_o == NULL) return 0; memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; if (a_o->walk == NULL) { WARN(1, "tc_dump_action: %s !capable of dumping table\n", a_o->kind); goto out_module_put; } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_module_put; ret = a_o->walk(skb, cb, RTM_GETACTION, &a); if (ret < 0) goto out_module_put; if (ret > 0) { nla_nest_end(skb, nest); ret = skb->len; } else nla_nest_cancel(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).portid && ret) nlh->nlmsg_flags |= NLM_F_MULTI; module_put(a_o->owner); return skb->len; out_module_put: module_put(a_o->owner); nlmsg_trim(skb, b); return skb->len; } static int __init tc_action_init(void) { rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, NULL); return 0; } subsys_initcall(tc_action_init);
gpl-2.0
sukso96100/WhiteBeam-Kernel-For-YP-GB1
fs/jffs2/build.c
1573
11085
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); static inline struct jffs2_inode_cache * first_inode_chain(int *i, struct jffs2_sb_info *c) { for (; *i < INOCACHE_HASHSIZE; (*i)++) { if (c->inocache_list[*i]) return c->inocache_list[*i]; } return NULL; } static inline struct jffs2_inode_cache * next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) { /* More in this chain? */ if (ic->next) return ic->next; (*i)++; return first_inode_chain(i, c); } #define for_each_inode(i, c, ic) \ for (i = 0, ic = first_inode_chain(&i, (c)); \ ic; \ ic = next_inode(&i, ic, (c))) static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { struct jffs2_full_dirent *fd; dbg_fsbuild("building directory inode #%u\n", ic->ino); /* For each child, increase nlink */ for(fd = ic->scan_dents; fd; fd = fd->next) { struct jffs2_inode_cache *child_ic; if (!fd->ino) continue; /* we can get high latency here with huge directories */ child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", fd->name, fd->ino, ic->ino); jffs2_mark_node_obsolete(c, fd->raw); continue; } if (fd->type == DT_DIR) { if (child_ic->pino_nlink) { JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino); /* TODO: What do we do about it? */ } else { child_ic->pino_nlink = ic->ino; } } else child_ic->pino_nlink++; dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); /* Can't free scan_dents so far. We might need them in pass 2 */ } } /* Scan plan: - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go - Scan directory tree from top down, setting nlink in inocaches - Scan inocaches for inodes with nlink==0 */ static int jffs2_build_filesystem(struct jffs2_sb_info *c) { int ret; int i; struct jffs2_inode_cache *ic; struct jffs2_full_dirent *fd; struct jffs2_full_dirent *dead_fds = NULL; dbg_fsbuild("build FS data structures\n"); /* First, scan the medium and build all the inode caches with lists of physical nodes */ c->flags |= JFFS2_SB_FLAG_SCANNING; ret = jffs2_scan_medium(c); c->flags &= ~JFFS2_SB_FLAG_SCANNING; if (ret) goto exit; dbg_fsbuild("scanned flash completely\n"); jffs2_dbg_dump_block_lists_nolock(c); dbg_fsbuild("pass 1 starting\n"); c->flags |= JFFS2_SB_FLAG_BUILDING; /* Now scan the directory tree, increasing nlink according to every dirent found. */ for_each_inode(i, c, ic) { if (ic->scan_dents) { jffs2_build_inode_pass1(c, ic); cond_resched(); } } dbg_fsbuild("pass 1 complete\n"); /* Next, scan for inodes with nlink == 0 and remove them. If they were directories, then decrement the nlink of their children too, and repeat the scan. As that's going to be a fairly uncommon occurrence, it's not so evil to do it this way. Recursion bad. */ dbg_fsbuild("pass 2 starting\n"); for_each_inode(i, c, ic) { if (ic->pino_nlink) continue; jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); cond_resched(); } dbg_fsbuild("pass 2a starting\n"); while (dead_fds) { fd = dead_fds; dead_fds = fd->next; ic = jffs2_get_ino_cache(c, fd->ino); if (ic) jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); jffs2_free_full_dirent(fd); } dbg_fsbuild("pass 2a complete\n"); dbg_fsbuild("freeing temporary data structures\n"); /* Finally, we can scan again and free the dirent structs */ for_each_inode(i, c, ic) { while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; jffs2_free_full_dirent(fd); } ic->scan_dents = NULL; cond_resched(); } jffs2_build_xattr_subsystem(c); c->flags &= ~JFFS2_SB_FLAG_BUILDING; dbg_fsbuild("FS build complete\n"); /* Rotate the lists by some number to ensure wear levelling */ jffs2_rotate_lists(c); ret = 0; exit: if (ret) { for_each_inode(i, c, ic) { while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; jffs2_free_full_dirent(fd); } } jffs2_clear_xattr_subsystem(c); } return ret; } static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) { struct jffs2_raw_node_ref *raw; struct jffs2_full_dirent *fd; dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); raw = ic->nodes; while (raw != (void *)ic) { struct jffs2_raw_node_ref *next = raw->next_in_ino; dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); jffs2_mark_node_obsolete(c, raw); raw = next; } if (ic->scan_dents) { int whinged = 0; dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); while(ic->scan_dents) { struct jffs2_inode_cache *child_ic; fd = ic->scan_dents; ic->scan_dents = fd->next; if (!fd->ino) { /* It's a deletion dirent. Ignore it */ dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); jffs2_free_full_dirent(fd); continue; } if (!whinged) whinged = 1; dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); jffs2_free_full_dirent(fd); continue; } /* Reduce nlink of the child. If it's now zero, stick it on the dead_fds list to be cleaned up later. Else just free the fd */ if (fd->type == DT_DIR) child_ic->pino_nlink = 0; else child_ic->pino_nlink--; if (!child_ic->pino_nlink) { dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", fd->ino, fd->name); fd->next = *dead_fds; *dead_fds = fd; } else { dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", fd->ino, fd->name, child_ic->pino_nlink); jffs2_free_full_dirent(fd); } } } /* We don't delete the inocache from the hash list and free it yet. The erase code will do that, when all the nodes are completely gone. */ } static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) { uint32_t size; /* Deletion should almost _always_ be allowed. We're fairly buggered once we stop allowing people to delete stuff because there's not enough free space... */ c->resv_blocks_deletion = 2; /* Be conservative about how much space we need before we allow writes. On top of that which is required for deletia, require an extra 2% of the medium to be available, for overhead caused by nodes being split across blocks, etc. */ size = c->flash_size / 50; /* 2% of flash size */ size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ size += c->sector_size - 1; /* ... and round up */ c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); /* When do we let the GC thread run in the background */ c->resv_blocks_gctrigger = c->resv_blocks_write + 1; /* When do we allow garbage collection to merge nodes to make long-term progress at the expense of short-term space exhaustion? */ c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; /* When do we allow garbage collection to eat from bad blocks rather than actually making progress? */ c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; /* What number of 'very dirty' eraseblocks do we allow before we trigger the GC thread even if we don't _need_ the space. When we can't mark nodes obsolete on the medium, the old dirty nodes cause performance problems because we have to inspect and discard them. */ c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; if (jffs2_can_mark_obsolete(c)) c->vdirty_blocks_gctrigger *= 10; /* If there's less than this amount of dirty space, don't bother trying to GC to make more space. It'll be a fruitless task */ c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", c->nospc_dirty_size); dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", c->vdirty_blocks_gctrigger); } int jffs2_do_mount_fs(struct jffs2_sb_info *c) { int ret; int i; int size; c->free_size = c->flash_size; c->nr_blocks = c->flash_size / c->sector_size; size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) c->blocks = vmalloc(size); else #endif c->blocks = kmalloc(size, GFP_KERNEL); if (!c->blocks) return -ENOMEM; memset(c->blocks, 0, size); for (i=0; i<c->nr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i].list); c->blocks[i].offset = i * c->sector_size; c->blocks[i].free_size = c->sector_size; } INIT_LIST_HEAD(&c->clean_list); INIT_LIST_HEAD(&c->very_dirty_list); INIT_LIST_HEAD(&c->dirty_list); INIT_LIST_HEAD(&c->erasable_list); INIT_LIST_HEAD(&c->erasing_list); INIT_LIST_HEAD(&c->erase_checking_list); INIT_LIST_HEAD(&c->erase_pending_list); INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); INIT_LIST_HEAD(&c->erase_complete_list); INIT_LIST_HEAD(&c->free_list); INIT_LIST_HEAD(&c->bad_list); INIT_LIST_HEAD(&c->bad_used_list); c->highest_ino = 1; c->summary = NULL; ret = jffs2_sum_init(c); if (ret) goto out_free; if (jffs2_build_filesystem(c)) { dbg_fsbuild("build_fs failed\n"); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); ret = -EIO; goto out_free; } jffs2_calc_trigger_levels(c); return 0; out_free: #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else #endif kfree(c->blocks); return ret; }
gpl-2.0
AntaresOne/AntaresCore-Kernel-h815
drivers/net/ethernet/amd/sunlance.c
2085
40781
/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $ * lance.c: Linux/Sparc/Lance driver * * Written 1995, 1996 by Miguel de Icaza * Sources: * The Linux depca driver * The Linux lance driver. * The Linux skeleton driver. * The NetBSD Sparc/Lance driver. * Theo de Raadt (deraadt@openbsd.org) * NCR92C990 Lan Controller manual * * 1.4: * Added support to run with a ledma on the Sun4m * * 1.5: * Added multiple card detection. * * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost * (ecd@skynet.be) * * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost * (ecd@skynet.be) * * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller * (davem@caip.rutgers.edu) * * 5/29/96: override option 'tpe-link-test?', if it is 'false', as * this disables auto carrier detection on sun4m. Eddie C. Dost * (ecd@skynet.be) * * 1.7: * 6/26/96: Bug fix for multiple ledmas, miguel. * * 1.8: * Stole multicast code from depca.c, fixed lance_tx. * * 1.9: * 8/21/96: Fixed the multicast code (Pedro Roque) * * 8/28/96: Send fake packet in lance_open() if auto_select is true, * so we can detect the carrier loss condition in time. * Eddie C. Dost (ecd@skynet.be) * * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an * MNA trap during chksum_partial_copy(). (ecd@skynet.be) * * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be) * * 12/22/96: Don't loop forever in lance_rx() on incomplete packets. * This was the sun4c killer. Shit, stupid bug. * (ecd@skynet.be) * * 1.10: * 1/26/97: Modularize driver. (ecd@skynet.be) * * 1.11: * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz) * * 1.12: * 11/3/99: Fixed SMP race in lance_start_xmit found by davem. * Anton Blanchard (anton@progsoc.uts.edu.au) * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces. * David S. Miller (davem@redhat.com) * 2.01: * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com) * */ #undef DEBUG_DRIVER static char lancestr[] = "LANCE"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/socket.h> /* Used for the temporal inet entries and routing */ #include <linux/route.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/pgtable.h> #include <asm/byteorder.h> /* Used by the checksum routines */ #include <asm/idprom.h> #include <asm/prom.h> #include <asm/auxio.h> /* For tpe-link-test? setting */ #include <asm/irq.h> #define DRV_NAME "sunlance" #define DRV_VERSION "2.02" #define DRV_RELDATE "8/24/03" #define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun Lance ethernet driver"); MODULE_LICENSE("GPL"); /* Define: 2^4 Tx buffers and 2^4 Rx buffers */ #ifndef LANCE_LOG_TX_BUFFERS #define LANCE_LOG_TX_BUFFERS 4 #define LANCE_LOG_RX_BUFFERS 4 #endif #define LE_CSR0 0 #define LE_CSR1 1 #define LE_CSR2 2 #define LE_CSR3 3 #define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ #define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ #define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ #define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ #define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ #define LE_C0_MERR 0x0800 /* ME: Memory error */ #define LE_C0_RINT 0x0400 /* Received interrupt */ #define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ #define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ #define LE_C0_INTR 0x0080 /* Interrupt or error */ #define LE_C0_INEA 0x0040 /* Interrupt enable */ #define LE_C0_RXON 0x0020 /* Receiver on */ #define LE_C0_TXON 0x0010 /* Transmitter on */ #define LE_C0_TDMD 0x0008 /* Transmitter demand */ #define LE_C0_STOP 0x0004 /* Stop the card */ #define LE_C0_STRT 0x0002 /* Start the card */ #define LE_C0_INIT 0x0001 /* Init the card */ #define LE_C3_BSWP 0x4 /* SWAP */ #define LE_C3_ACON 0x2 /* ALE Control */ #define LE_C3_BCON 0x1 /* Byte control */ /* Receive message descriptor 1 */ #define LE_R1_OWN 0x80 /* Who owns the entry */ #define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ #define LE_R1_FRA 0x20 /* FRA: Frame error */ #define LE_R1_OFL 0x10 /* OFL: Frame overflow */ #define LE_R1_CRC 0x08 /* CRC error */ #define LE_R1_BUF 0x04 /* BUF: Buffer error */ #define LE_R1_SOP 0x02 /* Start of packet */ #define LE_R1_EOP 0x01 /* End of packet */ #define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ #define LE_T1_OWN 0x80 /* Lance owns the packet */ #define LE_T1_ERR 0x40 /* Error summary */ #define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ #define LE_T1_EONE 0x08 /* Error: one retry needed */ #define LE_T1_EDEF 0x04 /* Error: deferred */ #define LE_T1_SOP 0x02 /* Start of packet */ #define LE_T1_EOP 0x01 /* End of packet */ #define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ #define LE_T3_BUF 0x8000 /* Buffer error */ #define LE_T3_UFL 0x4000 /* Error underflow */ #define LE_T3_LCOL 0x1000 /* Error late collision */ #define LE_T3_CLOS 0x0800 /* Error carrier loss */ #define LE_T3_RTY 0x0400 /* Error retry */ #define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) #define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK) #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) #define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK) #define PKT_BUF_SZ 1544 #define RX_BUFF_SIZE PKT_BUF_SZ #define TX_BUFF_SIZE PKT_BUF_SZ struct lance_rx_desc { u16 rmd0; /* low address of packet */ u8 rmd1_bits; /* descriptor bits */ u8 rmd1_hadr; /* high address of packet */ s16 length; /* This length is 2s complement (negative)! * Buffer length */ u16 mblength; /* This is the actual number of bytes received */ }; struct lance_tx_desc { u16 tmd0; /* low address of packet */ u8 tmd1_bits; /* descriptor bits */ u8 tmd1_hadr; /* high address of packet */ s16 length; /* Length is 2s complement (negative)! */ u16 misc; }; /* The LANCE initialization block, described in databook. */ /* On the Sparc, this block should be on a DMA region */ struct lance_init_block { u16 mode; /* Pre-set mode (reg. 15) */ u8 phys_addr[6]; /* Physical ethernet address */ u32 filter[2]; /* Multicast filter. */ /* Receive and transmit ring base, along with extra bits. */ u16 rx_ptr; /* receive descriptor addr */ u16 rx_len; /* receive len and high addr */ u16 tx_ptr; /* transmit descriptor addr */ u16 tx_len; /* transmit len and high addr */ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ struct lance_rx_desc brx_ring[RX_RING_SIZE]; struct lance_tx_desc btx_ring[TX_RING_SIZE]; u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; u8 pad[2]; /* align rx_buf for copy_and_sum(). */ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; }; #define libdesc_offset(rt, elem) \ ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) #define libbuff_offset(rt, elem) \ ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0]))))) struct lance_private { void __iomem *lregs; /* Lance RAP/RDP regs. */ void __iomem *dregs; /* DMA controller regs. */ struct lance_init_block __iomem *init_block_iomem; struct lance_init_block *init_block_mem; spinlock_t lock; int rx_new, tx_new; int rx_old, tx_old; struct platform_device *ledma; /* If set this points to ledma */ char tpe; /* cable-selection is TPE */ char auto_select; /* cable-selection by carrier */ char burst_sizes; /* ledma SBus burst sizes */ char pio_buffer; /* init block in PIO space? */ unsigned short busmaster_regval; void (*init_ring)(struct net_device *); void (*rx)(struct net_device *); void (*tx)(struct net_device *); char *name; dma_addr_t init_block_dvma; struct net_device *dev; /* Backpointer */ struct platform_device *op; struct platform_device *lebuffer; struct timer_list multicast_timer; }; #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ lp->tx_old - lp->tx_new-1) /* Lance registers. */ #define RDP 0x00UL /* register data port */ #define RAP 0x02UL /* register address port */ #define LANCE_REG_SIZE 0x04UL #define STOP_LANCE(__lp) \ do { void __iomem *__base = (__lp)->lregs; \ sbus_writew(LE_CSR0, __base + RAP); \ sbus_writew(LE_C0_STOP, __base + RDP); \ } while (0) int sparc_lance_debug = 2; /* The Lance uses 24 bit addresses */ /* On the Sun4c the DVMA will provide the remaining bytes for us */ /* On the Sun4m we have to instruct the ledma to provide them */ /* Even worse, on scsi/ether SBUS cards, the init block and the * transmit/receive buffers are addresses as offsets from absolute * zero on the lebuffer PIO area. -DaveM */ #define LANCE_ADDR(x) ((long)(x) & ~0xff000000) /* Load the CSR registers */ static void load_csrs(struct lance_private *lp) { u32 leptr; if (lp->pio_buffer) leptr = 0; else leptr = LANCE_ADDR(lp->init_block_dvma); sbus_writew(LE_CSR1, lp->lregs + RAP); sbus_writew(leptr & 0xffff, lp->lregs + RDP); sbus_writew(LE_CSR2, lp->lregs + RAP); sbus_writew(leptr >> 16, lp->lregs + RDP); sbus_writew(LE_CSR3, lp->lregs + RAP); sbus_writew(lp->busmaster_regval, lp->lregs + RDP); /* Point back to csr0 */ sbus_writew(LE_CSR0, lp->lregs + RAP); } /* Setup the Lance Rx and Tx rings */ static void lance_init_ring_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; dma_addr_t aib = lp->init_block_dvma; __u32 leptr; int i; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; /* Setup the Tx ring entries */ for (i = 0; i < TX_RING_SIZE; i++) { leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; } /* Setup the Rx ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i)); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0)); ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); ib->rx_ptr = leptr; /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0)); ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); ib->tx_ptr = leptr; } static void lance_init_ring_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; u32 leptr; int i; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]); sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]); sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]); sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]); sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]); sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); /* Setup the Tx ring entries */ for (i = 0; i < TX_RING_SIZE; i++) { leptr = libbuff_offset(tx_buf, i); sbus_writew(leptr, &ib->btx_ring [i].tmd0); sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); sbus_writeb(0, &ib->btx_ring [i].tmd1_bits); /* The ones required by tmd2 */ sbus_writew(0xf000, &ib->btx_ring [i].length); sbus_writew(0, &ib->btx_ring [i].misc); } /* Setup the Rx ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { leptr = libbuff_offset(rx_buf, i); sbus_writew(leptr, &ib->brx_ring [i].rmd0); sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr); sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits); sbus_writew(-RX_BUFF_SIZE|0xf000, &ib->brx_ring [i].length); sbus_writew(0, &ib->brx_ring [i].mblength); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = libdesc_offset(brx_ring, 0); sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16), &ib->rx_len); sbus_writew(leptr, &ib->rx_ptr); /* Setup tx descriptor pointer */ leptr = libdesc_offset(btx_ring, 0); sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16), &ib->tx_len); sbus_writew(leptr, &ib->tx_ptr); } static void init_restart_ledma(struct lance_private *lp) { u32 csr = sbus_readl(lp->dregs + DMA_CSR); if (!(csr & DMA_HNDL_ERROR)) { /* E-Cache draining */ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) barrier(); } csr = sbus_readl(lp->dregs + DMA_CSR); csr &= ~DMA_E_BURSTS; if (lp->burst_sizes & DMA_BURST32) csr |= DMA_E_BURST32; else csr |= DMA_E_BURST16; csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV); if (lp->tpe) csr |= DMA_EN_ENETAUI; else csr &= ~DMA_EN_ENETAUI; udelay(20); sbus_writel(csr, lp->dregs + DMA_CSR); udelay(200); } static int init_restart_lance(struct lance_private *lp) { u16 regval = 0; int i; if (lp->dregs) init_restart_ledma(lp); sbus_writew(LE_CSR0, lp->lregs + RAP); sbus_writew(LE_C0_INIT, lp->lregs + RDP); /* Wait for the lance to complete initialization */ for (i = 0; i < 100; i++) { regval = sbus_readw(lp->lregs + RDP); if (regval & (LE_C0_ERR | LE_C0_IDON)) break; barrier(); } if (i == 100 || (regval & LE_C0_ERR)) { printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", i, regval); if (lp->dregs) printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR)); return -1; } /* Clear IDON by writing a "1", enable interrupts and start lance */ sbus_writew(LE_C0_IDON, lp->lregs + RDP); sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP); if (lp->dregs) { u32 csr = sbus_readl(lp->dregs + DMA_CSR); csr |= DMA_INT_ENAB; sbus_writel(csr, lp->dregs + DMA_CSR); } return 0; } static void lance_rx_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; struct lance_rx_desc *rd; u8 bits; int len, entry = lp->rx_new; struct sk_buff *skb; for (rd = &ib->brx_ring [entry]; !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [entry]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = RX_NEXT(entry); return; } dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [entry][0]), len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; entry = RX_NEXT(entry); } lp->rx_new = entry; } static void lance_tx_dvma(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib = lp->init_block_mem; int i, j; spin_lock(&lp->lock); j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { struct lance_tx_desc *td = &ib->btx_ring [i]; u8 bits = td->tmd1_bits; /* If we hit a packet not owned by us, stop */ if (bits & LE_T1_OWN) break; if (bits & LE_T1_ERR) { u16 status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } /* Buffer errors and underflows turn off the * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } else if ((bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits = bits & ~(LE_T1_POK); /* One collision before packet was sent. */ if (bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = TX_NEXT(j); } lp->tx_old = j; out: if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); spin_unlock(&lp->lock); } static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len) { u16 *p16 = (u16 *) skb->data; u32 *p32; u8 *p8; void __iomem *pbuf = piobuf; /* We know here that both src and dest are on a 16bit boundary. */ *p16++ = sbus_readw(pbuf); p32 = (u32 *) p16; pbuf += 2; len -= 2; while (len >= 4) { *p32++ = sbus_readl(pbuf); pbuf += 4; len -= 4; } p8 = (u8 *) p32; if (len >= 2) { p16 = (u16 *) p32; *p16++ = sbus_readw(pbuf); pbuf += 2; len -= 2; p8 = (u8 *) p16; } if (len >= 1) *p8 = sbus_readb(pbuf); } static void lance_rx_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; struct lance_rx_desc __iomem *rd; unsigned char bits; int len, entry; struct sk_buff *skb; entry = lp->rx_new; for (rd = &ib->brx_ring [entry]; !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN); rd = &ib->brx_ring [entry]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (sbus_readw(&rd->mblength) & 0xfff) - 4; skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); lp->rx_new = RX_NEXT(entry); return; } dev->stats.rx_bytes += len; skb_reserve (skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; } /* Return the packet to the pool */ sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); entry = RX_NEXT(entry); } lp->rx_new = entry; } static void lance_tx_pio(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block __iomem *ib = lp->init_block_iomem; int i, j; spin_lock(&lp->lock); j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { struct lance_tx_desc __iomem *td = &ib->btx_ring [i]; u8 bits = sbus_readb(&td->tmd1_bits); /* If we hit a packet not owned by us, stop */ if (bits & LE_T1_OWN) break; if (bits & LE_T1_ERR) { u16 status = sbus_readw(&td->misc); dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } /* Buffer errors and underflows turn off the * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); STOP_LANCE(lp); lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); goto out; } } else if ((bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits); /* One collision before packet was sent. */ if (bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = TX_NEXT(j); } lp->tx_old = j; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); out: spin_unlock(&lp->lock); } static irqreturn_t lance_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; sbus_writew(LE_CSR0, lp->lregs + RAP); csr0 = sbus_readw(lp->lregs + RDP); /* Acknowledge all the interrupt sources ASAP */ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT), lp->lregs + RDP); if ((csr0 & LE_C0_ERR) != 0) { /* Clear the error condition */ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_CERR | LE_C0_MERR), lp->lregs + RDP); } if (csr0 & LE_C0_RINT) lp->rx(dev); if (csr0 & LE_C0_TINT) lp->tx(dev); if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { if (lp->dregs) { u32 addr = sbus_readl(lp->dregs + DMA_ADDR); printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n", dev->name, csr0, addr & 0xffffff); } else { printk(KERN_ERR "%s: Memory error, status %04x\n", dev->name, csr0); } sbus_writew(LE_C0_STOP, lp->lregs + RDP); if (lp->dregs) { u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR); dma_csr |= DMA_FIFO_INV; sbus_writel(dma_csr, lp->dregs + DMA_CSR); } lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); netif_wake_queue(dev); } sbus_writew(LE_C0_INEA, lp->lregs + RDP); return IRQ_HANDLED; } /* Build a fake network packet and send it to ourselves. */ static void build_fake_packet(struct lance_private *lp) { struct net_device *dev = lp->dev; int i, entry; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]); struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet; for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++) sbus_writew(0, &packet[i]); for (i = 0; i < 6; i++) { sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]); sbus_writeb(dev->dev_addr[i], &eth->h_source[i]); } sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length); sbus_writew(0, &ib->btx_ring[entry].misc); sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); } else { struct lance_init_block *ib = lp->init_block_mem; u16 *packet = (u16 *) &(ib->tx_buf[entry][0]); struct ethhdr *eth = (struct ethhdr *) packet; memset(packet, 0, ETH_ZLEN); for (i = 0; i < 6; i++) { eth->h_dest[i] = dev->dev_addr[i]; eth->h_source[i] = dev->dev_addr[i]; } ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000; ib->btx_ring[entry].misc = 0; ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); } lp->tx_new = TX_NEXT(entry); } static int lance_open(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status = 0; STOP_LANCE(lp); if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED, lancestr, (void *) dev)) { printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); return -EAGAIN; } /* On the 4m, setup the ledma to provide the upper bits for buffers */ if (lp->dregs) { u32 regval = lp->init_block_dvma & 0xff000000; sbus_writel(regval, lp->dregs + DMA_TEST); } /* Set mode and clear multicast filter only at device open, * so that lance_init_ring() called at any error will not * forget multicast filters. * * BTW it is common bug in all lance drivers! --ANK */ if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writew(0, &ib->mode); sbus_writel(0, &ib->filter[0]); sbus_writel(0, &ib->filter[1]); } else { struct lance_init_block *ib = lp->init_block_mem; ib->mode = 0; ib->filter [0] = 0; ib->filter [1] = 0; } lp->init_ring(dev); load_csrs(lp); netif_start_queue(dev); status = init_restart_lance(lp); if (!status && lp->auto_select) { build_fake_packet(lp); sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); } return status; } static int lance_close(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue(dev); del_timer_sync(&lp->multicast_timer); STOP_LANCE(lp); free_irq(dev->irq, (void *) dev); return 0; } static int lance_reset(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status; STOP_LANCE(lp); /* On the 4m, reset the dma too */ if (lp->dregs) { u32 csr, addr; printk(KERN_ERR "resetting ledma\n"); csr = sbus_readl(lp->dregs + DMA_CSR); sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); udelay(200); sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); addr = lp->init_block_dvma & 0xff000000; sbus_writel(addr, lp->dregs + DMA_TEST); } lp->init_ring(dev); load_csrs(lp); dev->trans_start = jiffies; /* prevent tx timeout */ status = init_restart_lance(lp); return status; } static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len) { void __iomem *piobuf = dest; u32 *p32; u16 *p16; u8 *p8; switch ((unsigned long)src & 0x3) { case 0: p32 = (u32 *) src; while (len >= 4) { sbus_writel(*p32, piobuf); p32++; piobuf += 4; len -= 4; } src = (char *) p32; break; case 1: case 3: p8 = (u8 *) src; while (len >= 4) { u32 val; val = p8[0] << 24; val |= p8[1] << 16; val |= p8[2] << 8; val |= p8[3]; sbus_writel(val, piobuf); p8 += 4; piobuf += 4; len -= 4; } src = (char *) p8; break; case 2: p16 = (u16 *) src; while (len >= 4) { u32 val = p16[0]<<16 | p16[1]; sbus_writel(val, piobuf); p16 += 2; piobuf += 4; len -= 4; } src = (char *) p16; break; } if (len >= 2) { u16 val = src[0] << 8 | src[1]; sbus_writew(val, piobuf); src += 2; piobuf += 2; len -= 2; } if (len >= 1) sbus_writeb(src[0], piobuf); } static void lance_piozero(void __iomem *dest, int len) { void __iomem *piobuf = dest; if ((unsigned long)piobuf & 1) { sbus_writeb(0, piobuf); piobuf += 1; len -= 1; if (len == 0) return; } if (len == 1) { sbus_writeb(0, piobuf); return; } if ((unsigned long)piobuf & 2) { sbus_writew(0, piobuf); piobuf += 2; len -= 2; if (len == 0) return; } while (len >= 4) { sbus_writel(0, piobuf); piobuf += 4; len -= 4; } if (len >= 2) { sbus_writew(0, piobuf); piobuf += 2; len -= 2; } if (len >= 1) sbus_writeb(0, piobuf); } static void lance_tx_timeout(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", dev->name, sbus_readw(lp->lregs + RDP)); lance_reset(dev); netif_wake_queue(dev); } static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; skblen = skb->len; len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; spin_lock_irq(&lp->lock); dev->stats.tx_bytes += len; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); sbus_writew(0, &ib->btx_ring[entry].misc); lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen); if (len != skblen) lance_piozero(&ib->tx_buf[entry][skblen], len - skblen); sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); } else { struct lance_init_block *ib = lp->init_block_mem; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); if (len != skblen) memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); } lp->tx_new = TX_NEXT(entry); if (TX_BUFFS_AVAIL <= 0) netif_stop_queue(dev); /* Kick the lance: transmit now */ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); /* Read back CSR to invalidate the E-Cache. * This is needed, because DMA_DSBL_WR_INV is set. */ if (lp->dregs) sbus_readw(lp->lregs + RDP); spin_unlock_irq(&lp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* taken from the depca driver */ static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct netdev_hw_addr *ha; u32 crc; u32 val; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) val = ~0; else val = 0; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; sbus_writel(val, &ib->filter[0]); sbus_writel(val, &ib->filter[1]); } else { struct lance_init_block *ib = lp->init_block_mem; ib->filter [0] = val; ib->filter [1] = val; } if (dev->flags & IFF_ALLMULTI) return; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; if (lp->pio_buffer) { struct lance_init_block __iomem *ib = lp->init_block_iomem; u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter; u16 tmp = sbus_readw(&mcast_table[crc>>4]); tmp |= 1 << (crc & 0xf); sbus_writew(tmp, &mcast_table[crc>>4]); } else { struct lance_init_block *ib = lp->init_block_mem; u16 *mcast_table = (u16 *) &ib->filter; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } } static void lance_set_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_init_block *ib_mem = lp->init_block_mem; struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem; u16 mode; if (!netif_running(dev)) return; if (lp->tx_old != lp->tx_new) { mod_timer(&lp->multicast_timer, jiffies + 4); netif_wake_queue(dev); return; } netif_stop_queue(dev); STOP_LANCE(lp); lp->init_ring(dev); if (lp->pio_buffer) mode = sbus_readw(&ib_iomem->mode); else mode = ib_mem->mode; if (dev->flags & IFF_PROMISC) { mode |= LE_MO_PROM; if (lp->pio_buffer) sbus_writew(mode, &ib_iomem->mode); else ib_mem->mode = mode; } else { mode &= ~LE_MO_PROM; if (lp->pio_buffer) sbus_writew(mode, &ib_iomem->mode); else ib_mem->mode = mode; lance_load_multicast(dev); } load_csrs(lp); init_restart_lance(lp); netif_wake_queue(dev); } static void lance_set_multicast_retry(unsigned long _opaque) { struct net_device *dev = (struct net_device *) _opaque; lance_set_multicast(dev); } static void lance_free_hwresources(struct lance_private *lp) { if (lp->lregs) of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE); if (lp->dregs) { struct platform_device *ledma = lp->ledma; of_iounmap(&ledma->resource[0], lp->dregs, resource_size(&ledma->resource[0])); } if (lp->init_block_iomem) { of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem, sizeof(struct lance_init_block)); } else if (lp->init_block_mem) { dma_free_coherent(&lp->op->dev, sizeof(struct lance_init_block), lp->init_block_mem, lp->init_block_dvma); } } /* Ethtool support... */ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "sunlance", sizeof(info->driver)); strlcpy(info->version, "2.02", sizeof(info->version)); } static const struct ethtool_ops sparc_lance_ethtool_ops = { .get_drvinfo = sparc_lance_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops sparc_lance_ops = { .ndo_open = lance_open, .ndo_stop = lance_close, .ndo_start_xmit = lance_start_xmit, .ndo_set_rx_mode = lance_set_multicast, .ndo_tx_timeout = lance_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int sparc_lance_probe_one(struct platform_device *op, struct platform_device *ledma, struct platform_device *lebuffer) { struct device_node *dp = op->dev.of_node; static unsigned version_printed; struct lance_private *lp; struct net_device *dev; int i; dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) return -ENOMEM; lp = netdev_priv(dev); if (sparc_lance_debug && version_printed++ == 0) printk (KERN_INFO "%s", version); spin_lock_init(&lp->lock); /* Copy the IDPROM ethernet address to the device structure, later we * will copy the address in the device structure to the lance * initialization block. */ for (i = 0; i < 6; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* Get the IO region */ lp->lregs = of_ioremap(&op->resource[0], 0, LANCE_REG_SIZE, lancestr); if (!lp->lregs) { printk(KERN_ERR "SunLance: Cannot map registers.\n"); goto fail; } lp->ledma = ledma; if (lp->ledma) { lp->dregs = of_ioremap(&ledma->resource[0], 0, resource_size(&ledma->resource[0]), "ledma"); if (!lp->dregs) { printk(KERN_ERR "SunLance: Cannot map " "ledma registers.\n"); goto fail; } } lp->op = op; lp->lebuffer = lebuffer; if (lebuffer) { /* sanity check */ if (lebuffer->resource[0].start & 7) { printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n"); goto fail; } lp->init_block_iomem = of_ioremap(&lebuffer->resource[0], 0, sizeof(struct lance_init_block), "lebuffer"); if (!lp->init_block_iomem) { printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n"); goto fail; } lp->init_block_dvma = 0; lp->pio_buffer = 1; lp->init_ring = lance_init_ring_pio; lp->rx = lance_rx_pio; lp->tx = lance_tx_pio; } else { lp->init_block_mem = dma_alloc_coherent(&op->dev, sizeof(struct lance_init_block), &lp->init_block_dvma, GFP_ATOMIC); if (!lp->init_block_mem) goto fail; lp->pio_buffer = 0; lp->init_ring = lance_init_ring_dvma; lp->rx = lance_rx_dvma; lp->tx = lance_tx_dvma; } lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", (LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON)); lp->name = lancestr; lp->burst_sizes = 0; if (lp->ledma) { struct device_node *ledma_dp = ledma->dev.of_node; struct device_node *sbus_dp; unsigned int sbmask; const char *prop; u32 csr; /* Find burst-size property for ledma */ lp->burst_sizes = of_getintprop_default(ledma_dp, "burst-sizes", 0); /* ledma may be capable of fast bursts, but sbus may not. */ sbus_dp = ledma_dp->parent; sbmask = of_getintprop_default(sbus_dp, "burst-sizes", DMA_BURSTBITS); lp->burst_sizes &= sbmask; /* Get the cable-selection property */ prop = of_get_property(ledma_dp, "cable-selection", NULL); if (!prop || prop[0] == '\0') { struct device_node *nd; printk(KERN_INFO "SunLance: using " "auto-carrier-detection.\n"); nd = of_find_node_by_path("/options"); if (!nd) goto no_link_test; prop = of_get_property(nd, "tpe-link-test?", NULL); if (!prop) goto no_link_test; if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " "'tpe-link-test?'\n"); printk(KERN_NOTICE "SunLance: warning: mail any problems " "to ecd@skynet.be\n"); auxio_set_lte(AUXIO_LTE_ON); } no_link_test: lp->auto_select = 1; lp->tpe = 0; } else if (!strcmp(prop, "aui")) { lp->auto_select = 0; lp->tpe = 0; } else { lp->auto_select = 0; lp->tpe = 1; } /* Reset ledma */ csr = sbus_readl(lp->dregs + DMA_CSR); sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); udelay(200); sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); } else lp->dregs = NULL; lp->dev = dev; SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->ethtool_ops = &sparc_lance_ethtool_ops; dev->netdev_ops = &sparc_lance_ops; dev->irq = op->archdata.irqs[0]; /* We cannot sleep if the chip is busy during a * multicast list update event, because such events * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ init_timer(&lp->multicast_timer); lp->multicast_timer.data = (unsigned long) dev; lp->multicast_timer.function = lance_set_multicast_retry; if (register_netdev(dev)) { printk(KERN_ERR "SunLance: Cannot register device.\n"); goto fail; } dev_set_drvdata(&op->dev, lp); printk(KERN_INFO "%s: LANCE %pM\n", dev->name, dev->dev_addr); return 0; fail: lance_free_hwresources(lp); free_netdev(dev); return -ENODEV; } static int sunlance_sbus_probe(struct platform_device *op) { struct platform_device *parent = to_platform_device(op->dev.parent); struct device_node *parent_dp = parent->dev.of_node; int err; if (!strcmp(parent_dp->name, "ledma")) { err = sparc_lance_probe_one(op, parent, NULL); } else if (!strcmp(parent_dp->name, "lebuffer")) { err = sparc_lance_probe_one(op, NULL, parent); } else err = sparc_lance_probe_one(op, NULL, NULL); return err; } static int sunlance_sbus_remove(struct platform_device *op) { struct lance_private *lp = dev_get_drvdata(&op->dev); struct net_device *net_dev = lp->dev; unregister_netdev(net_dev); lance_free_hwresources(lp); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id sunlance_sbus_match[] = { { .name = "le", }, {}, }; MODULE_DEVICE_TABLE(of, sunlance_sbus_match); static struct platform_driver sunlance_sbus_driver = { .driver = { .name = "sunlance", .owner = THIS_MODULE, .of_match_table = sunlance_sbus_match, }, .probe = sunlance_sbus_probe, .remove = sunlance_sbus_remove, }; module_platform_driver(sunlance_sbus_driver);
gpl-2.0
flar2/m7
arch/arm/mach-msm/msm_cpu_pwrctl.c
3621
1159
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/errno.h> static const phys_addr_t primary_cpu_pwrctl_phys = 0x2088004; static int __init msm_cpu_pwrctl_init(void) { void *pwrctl_ptr; unsigned int value; int rc = 0; pwrctl_ptr = ioremap_nocache(primary_cpu_pwrctl_phys, SZ_4K); if (unlikely(!pwrctl_ptr)) { pr_err("Failed to remap APCS_CPU_PWR_CTL register for CPU0\n"); rc = -EINVAL; goto done; } value = readl_relaxed(pwrctl_ptr); value |= 0x100; writel_relaxed(value, pwrctl_ptr); mb(); iounmap(pwrctl_ptr); done: return rc; } early_initcall(msm_cpu_pwrctl_init);
gpl-2.0
perillamint/android_kernel_casio_gzone
sound/pci/vx222/vx222.c
4901
7678
/* * Driver for Digigram VX222 V2/Mic PCI soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/tlv.h> #include "vx222.h" #define CARD_NAME "VX222" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static bool mic[SNDRV_CARDS]; /* microphone */ static int ibl[SNDRV_CARDS]; /* microphone */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); module_param_array(mic, bool, NULL, 0444); MODULE_PARM_DESC(mic, "Enable Microphone."); module_param_array(ibl, int, NULL, 0444); MODULE_PARM_DESC(ibl, "Capture IBL size."); /* */ enum { VX_PCI_VX222_OLD, VX_PCI_VX222_NEW }; static DEFINE_PCI_DEVICE_TABLE(snd_vx222_ids) = { { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); /* */ static const DECLARE_TLV_DB_SCALE(db_scale_old_vol, -11350, 50, 0); static const DECLARE_TLV_DB_SCALE(db_scale_akm, -7350, 50, 0); static struct snd_vx_hardware vx222_old_hw = { .name = "VX222/Old", .type = VX_TYPE_BOARD, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX_ANALOG_OUT_LEVEL_MAX, .output_level_db_scale = db_scale_old_vol, }; static struct snd_vx_hardware vx222_v2_hw = { .name = "VX222/v2", .type = VX_TYPE_V2, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; static struct snd_vx_hardware vx222_mic_hw = { .name = "VX222/Mic", .type = VX_TYPE_MIC, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; /* */ static int snd_vx222_free(struct vx_core *chip) { struct snd_vx222 *vx = (struct snd_vx222 *)chip; if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (vx->port[0]) pci_release_regions(vx->pci); pci_disable_device(vx->pci); kfree(chip); return 0; } static int snd_vx222_dev_free(struct snd_device *device) { struct vx_core *chip = device->device_data; return snd_vx222_free(chip); } static int __devinit snd_vx222_create(struct snd_card *card, struct pci_dev *pci, struct snd_vx_hardware *hw, struct snd_vx222 **rchip) { struct vx_core *chip; struct snd_vx222 *vx; int i, err; static struct snd_device_ops ops = { .dev_free = snd_vx222_dev_free, }; struct snd_vx_ops *vx_ops; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); vx_ops = hw->type == VX_TYPE_BOARD ? &vx222_old_ops : &vx222_ops; chip = snd_vx_create(card, hw, vx_ops, sizeof(struct snd_vx222) - sizeof(struct vx_core)); if (! chip) { pci_disable_device(pci); return -ENOMEM; } vx = (struct snd_vx222 *)chip; vx->pci = pci; if ((err = pci_request_regions(pci, CARD_NAME)) < 0) { snd_vx222_free(chip); return err; } for (i = 0; i < 2; i++) vx->port[i] = pci_resource_start(pci, i + 1); if (request_irq(pci->irq, snd_vx_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_vx222_free(chip); return -EBUSY; } chip->irq = pci->irq; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_vx222_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = vx; return 0; } static int __devinit snd_vx222_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_vx_hardware *hw; struct snd_vx222 *vx; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch ((int)pci_id->driver_data) { case VX_PCI_VX222_OLD: hw = &vx222_old_hw; break; case VX_PCI_VX222_NEW: default: if (mic[dev]) hw = &vx222_mic_hw; else hw = &vx222_v2_hw; break; } if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) { snd_card_free(card); return err; } card->private_data = vx; vx->core.ibl.size = ibl[dev]; sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %i", card->shortname, vx->port[0], vx->port[1], vx->core.irq); snd_printdd("%s at 0x%lx & 0x%lx, irq %i\n", card->shortname, vx->port[0], vx->port[1], vx->core.irq); #ifdef SND_VX_FW_LOADER vx->core.dev = &pci->dev; #endif if ((err = snd_vx_setup_firmware(&vx->core)) < 0) { snd_card_free(card); return err; } if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_vx222_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int snd_vx222_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_vx222 *vx = card->private_data; int err; err = snd_vx_suspend(&vx->core, state); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return err; } static int snd_vx222_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_vx222 *vx = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "vx222: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); return snd_vx_resume(&vx->core); } #endif static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_vx222_ids, .probe = snd_vx222_probe, .remove = __devexit_p(snd_vx222_remove), #ifdef CONFIG_PM .suspend = snd_vx222_suspend, .resume = snd_vx222_resume, #endif }; static int __init alsa_card_vx222_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_vx222_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_vx222_init) module_exit(alsa_card_vx222_exit)
gpl-2.0
sub77-twrp/android_kernel_samsung_matissewifi
net/sched/sch_multiq.c
4901
9512
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; struct tcf_proto *filter_list; struct Qdisc **queues; }; static struct Qdisc * multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct multiq_sched_data *q = qdisc_priv(sch); u32 band; struct tcf_result res; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif band = skb_get_queue_mapping(skb); if (band >= q->bands) return q->queues[0]; return q->queues[band]; } static int multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct Qdisc *qdisc; int ret; qdisc = multiq_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } #endif ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) sch->qstats.drops++; return ret; } static struct sk_buff *multiq_dequeue(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ q->curband++; if (q->curband >= q->bands) q->curband = 0; /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ if (!netif_xmit_stopped( netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } } } return NULL; } static struct sk_buff *multiq_peek(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned int curband = q->curband; struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ curband++; if (curband >= q->bands) curband = 0; /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ if (!netif_xmit_stopped( netdev_get_tx_queue(qdisc_dev(sch), curband))) { qdisc = q->queues[curband]; skb = qdisc->ops->peek(qdisc); if (skb) return skb; } } return NULL; } static unsigned int multiq_drop(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); int band; unsigned int len; struct Qdisc *qdisc; for (band = q->bands - 1; band >= 0; band--) { qdisc = q->queues[band]; if (qdisc->ops->drop) { len = qdisc->ops->drop(qdisc); if (len != 0) { sch->q.qlen--; return len; } } } return 0; } static void multiq_reset(struct Qdisc *sch) { u16 band; struct multiq_sched_data *q = qdisc_priv(sch); for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); sch->q.qlen = 0; q->curband = 0; } static void multiq_destroy(struct Qdisc *sch) { int band; struct multiq_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); kfree(q->queues); } static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); struct tc_multiq_qopt *qopt; int i; if (!netif_is_multiqueue(qdisc_dev(sch))) return -EOPNOTSUPP; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); qopt->bands = qdisc_dev(sch)->real_num_tx_queues; sch_tree_lock(sch); q->bands = qopt->bands; for (i = q->bands; i < q->max_bands; i++) { if (q->queues[i] != &noop_qdisc) { struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; if (old != &noop_qdisc) { qdisc_tree_decrease_qlen(old, old->q.qlen); qdisc_destroy(old); } sch_tree_unlock(sch); } } } return 0; } static int multiq_init(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); int i, err; q->queues = NULL; if (opt == NULL) return -EINVAL; q->max_bands = qdisc_dev(sch)->num_tx_queues; q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); if (!q->queues) return -ENOBUFS; for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; err = multiq_tune(sch, opt); if (err) kfree(q->queues); return err; } static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_multiq_qopt opt; opt.bands = q->bands; opt.max_bands = q->max_bands; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->queues[band]; q->queues[band] = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc * multiq_leaf(struct Qdisc *sch, unsigned long arg) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; return q->queues[band]; } static unsigned long multiq_get(struct Qdisc *sch, u32 classid) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = TC_H_MIN(classid); if (band - 1 >= q->bands) return 0; return band; } static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return multiq_get(sch, classid); } static void multiq_put(struct Qdisc *q, unsigned long cl) { } static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct multiq_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = q->queues[cl - 1]->handle; return 0; } static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; cl_q->qstats.qlen = cl_q->q.qlen; if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, &cl_q->qstats) < 0) return -1; return 0; } static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct multiq_sched_data *q = qdisc_priv(sch); int band; if (arg->stop) return; for (band = 0; band < q->bands; band++) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, band + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) { struct multiq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static const struct Qdisc_class_ops multiq_class_ops = { .graft = multiq_graft, .leaf = multiq_leaf, .get = multiq_get, .put = multiq_put, .walk = multiq_walk, .tcf_chain = multiq_find_tcf, .bind_tcf = multiq_bind, .unbind_tcf = multiq_put, .dump = multiq_dump_class, .dump_stats = multiq_dump_class_stats, }; static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &multiq_class_ops, .id = "multiq", .priv_size = sizeof(struct multiq_sched_data), .enqueue = multiq_enqueue, .dequeue = multiq_dequeue, .peek = multiq_peek, .drop = multiq_drop, .init = multiq_init, .reset = multiq_reset, .destroy = multiq_destroy, .change = multiq_tune, .dump = multiq_dump, .owner = THIS_MODULE, }; static int __init multiq_module_init(void) { return register_qdisc(&multiq_qdisc_ops); } static void __exit multiq_module_exit(void) { unregister_qdisc(&multiq_qdisc_ops); } module_init(multiq_module_init) module_exit(multiq_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
thypon/android_kernel_samsung_tuna
drivers/staging/line6/control.c
8229
44512
/* * Line6 Linux USB driver - 0.9.1beta * * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/usb.h> #include "control.h" #include "driver.h" #include "pod.h" #include "usbdefs.h" #include "variax.h" #define DEVICE_ATTR2(_name1, _name2, _mode, _show, _store) \ struct device_attribute dev_attr_##_name1 = __ATTR(_name2, _mode, _show, _store) #define LINE6_PARAM_R(PREFIX, prefix, type, param) \ static ssize_t prefix##_get_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return prefix##_get_param_##type(dev, buf, PREFIX##_##param); \ } #define LINE6_PARAM_RW(PREFIX, prefix, type, param) \ LINE6_PARAM_R(PREFIX, prefix, type, param); \ static ssize_t prefix##_set_##param(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ return prefix##_set_param_##type(dev, buf, count, PREFIX##_##param); \ } #define POD_PARAM_R(type, param) LINE6_PARAM_R(POD, pod, type, param) #define POD_PARAM_RW(type, param) LINE6_PARAM_RW(POD, pod, type, param) #define VARIAX_PARAM_R(type, param) LINE6_PARAM_R(VARIAX, variax, type, param) #define VARIAX_PARAM_RW(type, param) LINE6_PARAM_RW(VARIAX, variax, type, param) static ssize_t pod_get_param_int(struct device *dev, char *buf, int param) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_pod *pod = usb_get_intfdata(interface); int retval = line6_dump_wait_interruptible(&pod->dumpreq); if (retval < 0) return retval; return sprintf(buf, "%d\n", pod->prog_data.control[param]); } static ssize_t pod_set_param_int(struct device *dev, const char *buf, size_t count, int param) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_pod *pod = usb_get_intfdata(interface); unsigned long value; int retval; retval = strict_strtoul(buf, 10, &value); if (retval) return retval; line6_pod_transmit_parameter(pod, param, value); return count; } static ssize_t variax_get_param_int(struct device *dev, char *buf, int param) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_variax *variax = usb_get_intfdata(interface); int retval = line6_dump_wait_interruptible(&variax->dumpreq); if (retval < 0) return retval; return sprintf(buf, "%d\n", variax->model_data.control[param]); } static ssize_t variax_get_param_float(struct device *dev, char *buf, int param) { /* We do our own floating point handling here since at the time this code was written (Jan 2006) it was highly discouraged to use floating point arithmetic in the kernel. If you think that this no longer applies, feel free to replace this by generic floating point code. */ static const int BIAS = 0x7f; static const int OFFSET = 0xf; static const int PRECISION = 1000; int len = 0; unsigned part_int, part_frac; struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_variax *variax = usb_get_intfdata(interface); const unsigned char *p = variax->model_data.control + param; int retval = line6_dump_wait_interruptible(&variax->dumpreq); if (retval < 0) return retval; if ((p[0] == 0) && (p[1] == 0) && (p[2] == 0)) part_int = part_frac = 0; else { int exponent = (((p[0] & 0x7f) << 1) | (p[1] >> 7)) - BIAS; unsigned mantissa = (p[1] << 8) | p[2] | 0x8000; exponent -= OFFSET; if (exponent >= 0) { part_int = mantissa << exponent; part_frac = 0; } else { part_int = mantissa >> -exponent; part_frac = (mantissa << (32 + exponent)) & 0xffffffff; } part_frac = (part_frac / ((1UL << 31) / (PRECISION / 2 * 10)) + 5) / 10; } len += sprintf(buf + len, "%s%d.%03d\n", ((p[0] & 0x80) ? "-" : ""), part_int, part_frac); return len; } POD_PARAM_RW(int, tweak); POD_PARAM_RW(int, wah_position); POD_PARAM_RW(int, compression_gain); POD_PARAM_RW(int, vol_pedal_position); POD_PARAM_RW(int, compression_threshold); POD_PARAM_RW(int, pan); POD_PARAM_RW(int, amp_model_setup); POD_PARAM_RW(int, amp_model); POD_PARAM_RW(int, drive); POD_PARAM_RW(int, bass); POD_PARAM_RW(int, mid); POD_PARAM_RW(int, lowmid); POD_PARAM_RW(int, treble); POD_PARAM_RW(int, highmid); POD_PARAM_RW(int, chan_vol); POD_PARAM_RW(int, reverb_mix); POD_PARAM_RW(int, effect_setup); POD_PARAM_RW(int, band_1_frequency); POD_PARAM_RW(int, presence); POD_PARAM_RW(int, treble__bass); POD_PARAM_RW(int, noise_gate_enable); POD_PARAM_RW(int, gate_threshold); POD_PARAM_RW(int, gate_decay_time); POD_PARAM_RW(int, stomp_enable); POD_PARAM_RW(int, comp_enable); POD_PARAM_RW(int, stomp_time); POD_PARAM_RW(int, delay_enable); POD_PARAM_RW(int, mod_param_1); POD_PARAM_RW(int, delay_param_1); POD_PARAM_RW(int, delay_param_1_note_value); POD_PARAM_RW(int, band_2_frequency__bass); POD_PARAM_RW(int, delay_param_2); POD_PARAM_RW(int, delay_volume_mix); POD_PARAM_RW(int, delay_param_3); POD_PARAM_RW(int, reverb_enable); POD_PARAM_RW(int, reverb_type); POD_PARAM_RW(int, reverb_decay); POD_PARAM_RW(int, reverb_tone); POD_PARAM_RW(int, reverb_pre_delay); POD_PARAM_RW(int, reverb_pre_post); POD_PARAM_RW(int, band_2_frequency); POD_PARAM_RW(int, band_3_frequency__bass); POD_PARAM_RW(int, wah_enable); POD_PARAM_RW(int, modulation_lo_cut); POD_PARAM_RW(int, delay_reverb_lo_cut); POD_PARAM_RW(int, volume_pedal_minimum); POD_PARAM_RW(int, eq_pre_post); POD_PARAM_RW(int, volume_pre_post); POD_PARAM_RW(int, di_model); POD_PARAM_RW(int, di_delay); POD_PARAM_RW(int, mod_enable); POD_PARAM_RW(int, mod_param_1_note_value); POD_PARAM_RW(int, mod_param_2); POD_PARAM_RW(int, mod_param_3); POD_PARAM_RW(int, mod_param_4); POD_PARAM_RW(int, mod_param_5); POD_PARAM_RW(int, mod_volume_mix); POD_PARAM_RW(int, mod_pre_post); POD_PARAM_RW(int, modulation_model); POD_PARAM_RW(int, band_3_frequency); POD_PARAM_RW(int, band_4_frequency__bass); POD_PARAM_RW(int, mod_param_1_double_precision); POD_PARAM_RW(int, delay_param_1_double_precision); POD_PARAM_RW(int, eq_enable); POD_PARAM_RW(int, tap); POD_PARAM_RW(int, volume_tweak_pedal_assign); POD_PARAM_RW(int, band_5_frequency); POD_PARAM_RW(int, tuner); POD_PARAM_RW(int, mic_selection); POD_PARAM_RW(int, cabinet_model); POD_PARAM_RW(int, stomp_model); POD_PARAM_RW(int, roomlevel); POD_PARAM_RW(int, band_4_frequency); POD_PARAM_RW(int, band_6_frequency); POD_PARAM_RW(int, stomp_param_1_note_value); POD_PARAM_RW(int, stomp_param_2); POD_PARAM_RW(int, stomp_param_3); POD_PARAM_RW(int, stomp_param_4); POD_PARAM_RW(int, stomp_param_5); POD_PARAM_RW(int, stomp_param_6); POD_PARAM_RW(int, amp_switch_select); POD_PARAM_RW(int, delay_param_4); POD_PARAM_RW(int, delay_param_5); POD_PARAM_RW(int, delay_pre_post); POD_PARAM_RW(int, delay_model); POD_PARAM_RW(int, delay_verb_model); POD_PARAM_RW(int, tempo_msb); POD_PARAM_RW(int, tempo_lsb); POD_PARAM_RW(int, wah_model); POD_PARAM_RW(int, bypass_volume); POD_PARAM_RW(int, fx_loop_on_off); POD_PARAM_RW(int, tweak_param_select); POD_PARAM_RW(int, amp1_engage); POD_PARAM_RW(int, band_1_gain); POD_PARAM_RW(int, band_2_gain__bass); POD_PARAM_RW(int, band_2_gain); POD_PARAM_RW(int, band_3_gain__bass); POD_PARAM_RW(int, band_3_gain); POD_PARAM_RW(int, band_4_gain__bass); POD_PARAM_RW(int, band_5_gain__bass); POD_PARAM_RW(int, band_4_gain); POD_PARAM_RW(int, band_6_gain__bass); VARIAX_PARAM_R(int, body); VARIAX_PARAM_R(int, pickup1_enable); VARIAX_PARAM_R(int, pickup1_type); VARIAX_PARAM_R(float, pickup1_position); VARIAX_PARAM_R(float, pickup1_angle); VARIAX_PARAM_R(float, pickup1_level); VARIAX_PARAM_R(int, pickup2_enable); VARIAX_PARAM_R(int, pickup2_type); VARIAX_PARAM_R(float, pickup2_position); VARIAX_PARAM_R(float, pickup2_angle); VARIAX_PARAM_R(float, pickup2_level); VARIAX_PARAM_R(int, pickup_phase); VARIAX_PARAM_R(float, capacitance); VARIAX_PARAM_R(float, tone_resistance); VARIAX_PARAM_R(float, volume_resistance); VARIAX_PARAM_R(int, taper); VARIAX_PARAM_R(float, tone_dump); VARIAX_PARAM_R(int, save_tone); VARIAX_PARAM_R(float, volume_dump); VARIAX_PARAM_R(int, tuning_enable); VARIAX_PARAM_R(int, tuning6); VARIAX_PARAM_R(int, tuning5); VARIAX_PARAM_R(int, tuning4); VARIAX_PARAM_R(int, tuning3); VARIAX_PARAM_R(int, tuning2); VARIAX_PARAM_R(int, tuning1); VARIAX_PARAM_R(float, detune6); VARIAX_PARAM_R(float, detune5); VARIAX_PARAM_R(float, detune4); VARIAX_PARAM_R(float, detune3); VARIAX_PARAM_R(float, detune2); VARIAX_PARAM_R(float, detune1); VARIAX_PARAM_R(float, mix6); VARIAX_PARAM_R(float, mix5); VARIAX_PARAM_R(float, mix4); VARIAX_PARAM_R(float, mix3); VARIAX_PARAM_R(float, mix2); VARIAX_PARAM_R(float, mix1); VARIAX_PARAM_R(int, pickup_wiring); static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak); static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position); static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain); static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position); static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold); static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan); static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup); static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model); static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive); static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass); static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid); static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid); static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble); static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid); static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol); static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix); static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup); static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency); static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence); static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass); static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable); static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold); static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time); static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable); static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable); static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time); static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable); static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1); static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1); static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value); static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass); static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2); static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix); static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3); static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable); static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type); static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay); static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone); static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay); static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post); static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency); static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass); static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable); static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut); static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut); static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum); static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post); static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post); static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model); static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay); static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable); static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value); static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2); static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3); static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4); static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5); static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix); static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post); static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model); static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency); static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass); static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision); static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision); static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable); static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap); static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign); static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency); static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner); static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection); static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model); static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model); static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel); static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency); static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency); static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value); static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2); static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3); static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4); static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5); static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6); static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select); static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4); static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5); static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post); static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model); static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model); static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb); static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb); static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model); static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume); static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off); static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select); static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage); static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain); static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass); static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain); static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass); static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain); static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass); static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass); static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain); static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass); static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write); static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, line6_nop_write); static DEVICE_ATTR(pickup1_type, S_IRUGO, variax_get_pickup1_type, line6_nop_write); static DEVICE_ATTR(pickup1_position, S_IRUGO, variax_get_pickup1_position, line6_nop_write); static DEVICE_ATTR(pickup1_angle, S_IRUGO, variax_get_pickup1_angle, line6_nop_write); static DEVICE_ATTR(pickup1_level, S_IRUGO, variax_get_pickup1_level, line6_nop_write); static DEVICE_ATTR(pickup2_enable, S_IRUGO, variax_get_pickup2_enable, line6_nop_write); static DEVICE_ATTR(pickup2_type, S_IRUGO, variax_get_pickup2_type, line6_nop_write); static DEVICE_ATTR(pickup2_position, S_IRUGO, variax_get_pickup2_position, line6_nop_write); static DEVICE_ATTR(pickup2_angle, S_IRUGO, variax_get_pickup2_angle, line6_nop_write); static DEVICE_ATTR(pickup2_level, S_IRUGO, variax_get_pickup2_level, line6_nop_write); static DEVICE_ATTR(pickup_phase, S_IRUGO, variax_get_pickup_phase, line6_nop_write); static DEVICE_ATTR(capacitance, S_IRUGO, variax_get_capacitance, line6_nop_write); static DEVICE_ATTR(tone_resistance, S_IRUGO, variax_get_tone_resistance, line6_nop_write); static DEVICE_ATTR(volume_resistance, S_IRUGO, variax_get_volume_resistance, line6_nop_write); static DEVICE_ATTR(taper, S_IRUGO, variax_get_taper, line6_nop_write); static DEVICE_ATTR(tone_dump, S_IRUGO, variax_get_tone_dump, line6_nop_write); static DEVICE_ATTR(save_tone, S_IRUGO, variax_get_save_tone, line6_nop_write); static DEVICE_ATTR(volume_dump, S_IRUGO, variax_get_volume_dump, line6_nop_write); static DEVICE_ATTR(tuning_enable, S_IRUGO, variax_get_tuning_enable, line6_nop_write); static DEVICE_ATTR(tuning6, S_IRUGO, variax_get_tuning6, line6_nop_write); static DEVICE_ATTR(tuning5, S_IRUGO, variax_get_tuning5, line6_nop_write); static DEVICE_ATTR(tuning4, S_IRUGO, variax_get_tuning4, line6_nop_write); static DEVICE_ATTR(tuning3, S_IRUGO, variax_get_tuning3, line6_nop_write); static DEVICE_ATTR(tuning2, S_IRUGO, variax_get_tuning2, line6_nop_write); static DEVICE_ATTR(tuning1, S_IRUGO, variax_get_tuning1, line6_nop_write); static DEVICE_ATTR(detune6, S_IRUGO, variax_get_detune6, line6_nop_write); static DEVICE_ATTR(detune5, S_IRUGO, variax_get_detune5, line6_nop_write); static DEVICE_ATTR(detune4, S_IRUGO, variax_get_detune4, line6_nop_write); static DEVICE_ATTR(detune3, S_IRUGO, variax_get_detune3, line6_nop_write); static DEVICE_ATTR(detune2, S_IRUGO, variax_get_detune2, line6_nop_write); static DEVICE_ATTR(detune1, S_IRUGO, variax_get_detune1, line6_nop_write); static DEVICE_ATTR(mix6, S_IRUGO, variax_get_mix6, line6_nop_write); static DEVICE_ATTR(mix5, S_IRUGO, variax_get_mix5, line6_nop_write); static DEVICE_ATTR(mix4, S_IRUGO, variax_get_mix4, line6_nop_write); static DEVICE_ATTR(mix3, S_IRUGO, variax_get_mix3, line6_nop_write); static DEVICE_ATTR(mix2, S_IRUGO, variax_get_mix2, line6_nop_write); static DEVICE_ATTR(mix1, S_IRUGO, variax_get_mix1, line6_nop_write); static DEVICE_ATTR(pickup_wiring, S_IRUGO, variax_get_pickup_wiring, line6_nop_write); int line6_pod_create_files(int firmware, int type, struct device *dev) { int err; CHECK_RETURN(device_create_file(dev, &dev_attr_tweak)); CHECK_RETURN(device_create_file(dev, &dev_attr_wah_position)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_compression_gain)); CHECK_RETURN(device_create_file(dev, &dev_attr_vol_pedal_position)); CHECK_RETURN(device_create_file(dev, &dev_attr_compression_threshold)); CHECK_RETURN(device_create_file(dev, &dev_attr_pan)); CHECK_RETURN(device_create_file(dev, &dev_attr_amp_model_setup)); if (firmware >= 200) CHECK_RETURN(device_create_file(dev, &dev_attr_amp_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_drive)); CHECK_RETURN(device_create_file(dev, &dev_attr_bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_mid)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_lowmid)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_treble)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_highmid)); CHECK_RETURN(device_create_file(dev, &dev_attr_chan_vol)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_mix)); CHECK_RETURN(device_create_file(dev, &dev_attr_effect_setup)); if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_1_frequency)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_presence)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_treble__bass)); CHECK_RETURN(device_create_file(dev, &dev_attr_noise_gate_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_gate_threshold)); CHECK_RETURN(device_create_file(dev, &dev_attr_gate_decay_time)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_comp_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_time)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_1)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_1)); CHECK_RETURN(device_create_file (dev, &dev_attr_delay_param_1_note_value)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_frequency__bass)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_2)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_volume_mix)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_3)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_enable)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_type)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_decay)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_tone)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_reverb_pre_delay)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_reverb_pre_post)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_frequency)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_frequency__bass)); CHECK_RETURN(device_create_file(dev, &dev_attr_wah_enable)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_modulation_lo_cut)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_delay_reverb_lo_cut)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_volume_pedal_minimum)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_eq_pre_post)); CHECK_RETURN(device_create_file(dev, &dev_attr_volume_pre_post)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_di_model)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_di_delay)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_1_note_value)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_2)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_3)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_4)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_5)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_volume_mix)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_pre_post)); CHECK_RETURN(device_create_file(dev, &dev_attr_modulation_model)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_frequency)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_frequency__bass)); CHECK_RETURN(device_create_file (dev, &dev_attr_mod_param_1_double_precision)); CHECK_RETURN(device_create_file (dev, &dev_attr_delay_param_1_double_precision)); if (firmware >= 200) CHECK_RETURN(device_create_file(dev, &dev_attr_eq_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_tap)); CHECK_RETURN(device_create_file (dev, &dev_attr_volume_tweak_pedal_assign)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_5_frequency)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuner)); CHECK_RETURN(device_create_file(dev, &dev_attr_mic_selection)); CHECK_RETURN(device_create_file(dev, &dev_attr_cabinet_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_roomlevel)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_frequency)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_6_frequency)); CHECK_RETURN(device_create_file (dev, &dev_attr_stomp_param_1_note_value)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_2)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_3)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_4)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_5)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_6)); if ((type & (LINE6_BITS_LIVE)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_amp_switch_select)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_4)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_5)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_pre_post)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_delay_model)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_delay_verb_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_tempo_msb)); CHECK_RETURN(device_create_file(dev, &dev_attr_tempo_lsb)); if (firmware >= 300) CHECK_RETURN(device_create_file(dev, &dev_attr_wah_model)); if (firmware >= 214) CHECK_RETURN(device_create_file(dev, &dev_attr_bypass_volume)); if ((type & (LINE6_BITS_PRO)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_fx_loop_on_off)); CHECK_RETURN(device_create_file(dev, &dev_attr_tweak_param_select)); CHECK_RETURN(device_create_file(dev, &dev_attr_amp1_engage)); if (firmware >= 200) CHECK_RETURN(device_create_file(dev, &dev_attr_band_1_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_gain__bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_gain__bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_gain__bass)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_5_gain__bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_6_gain__bass)); return 0; } void line6_pod_remove_files(int firmware, int type, struct device *dev) { device_remove_file(dev, &dev_attr_tweak); device_remove_file(dev, &dev_attr_wah_position); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_compression_gain); device_remove_file(dev, &dev_attr_vol_pedal_position); device_remove_file(dev, &dev_attr_compression_threshold); device_remove_file(dev, &dev_attr_pan); device_remove_file(dev, &dev_attr_amp_model_setup); if (firmware >= 200) device_remove_file(dev, &dev_attr_amp_model); device_remove_file(dev, &dev_attr_drive); device_remove_file(dev, &dev_attr_bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_mid); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_lowmid); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_treble); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_highmid); device_remove_file(dev, &dev_attr_chan_vol); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_mix); device_remove_file(dev, &dev_attr_effect_setup); if (firmware >= 200) device_remove_file(dev, &dev_attr_band_1_frequency); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_presence); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_treble__bass); device_remove_file(dev, &dev_attr_noise_gate_enable); device_remove_file(dev, &dev_attr_gate_threshold); device_remove_file(dev, &dev_attr_gate_decay_time); device_remove_file(dev, &dev_attr_stomp_enable); device_remove_file(dev, &dev_attr_comp_enable); device_remove_file(dev, &dev_attr_stomp_time); device_remove_file(dev, &dev_attr_delay_enable); device_remove_file(dev, &dev_attr_mod_param_1); device_remove_file(dev, &dev_attr_delay_param_1); device_remove_file(dev, &dev_attr_delay_param_1_note_value); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_frequency__bass); device_remove_file(dev, &dev_attr_delay_param_2); device_remove_file(dev, &dev_attr_delay_volume_mix); device_remove_file(dev, &dev_attr_delay_param_3); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_enable); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_type); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_decay); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_tone); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_pre_delay); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_pre_post); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_frequency); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_frequency__bass); device_remove_file(dev, &dev_attr_wah_enable); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_modulation_lo_cut); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_delay_reverb_lo_cut); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_volume_pedal_minimum); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_eq_pre_post); device_remove_file(dev, &dev_attr_volume_pre_post); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_di_model); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_di_delay); device_remove_file(dev, &dev_attr_mod_enable); device_remove_file(dev, &dev_attr_mod_param_1_note_value); device_remove_file(dev, &dev_attr_mod_param_2); device_remove_file(dev, &dev_attr_mod_param_3); device_remove_file(dev, &dev_attr_mod_param_4); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_mod_param_5); device_remove_file(dev, &dev_attr_mod_volume_mix); device_remove_file(dev, &dev_attr_mod_pre_post); device_remove_file(dev, &dev_attr_modulation_model); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_frequency); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_frequency__bass); device_remove_file(dev, &dev_attr_mod_param_1_double_precision); device_remove_file(dev, &dev_attr_delay_param_1_double_precision); if (firmware >= 200) device_remove_file(dev, &dev_attr_eq_enable); device_remove_file(dev, &dev_attr_tap); device_remove_file(dev, &dev_attr_volume_tweak_pedal_assign); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_5_frequency); device_remove_file(dev, &dev_attr_tuner); device_remove_file(dev, &dev_attr_mic_selection); device_remove_file(dev, &dev_attr_cabinet_model); device_remove_file(dev, &dev_attr_stomp_model); device_remove_file(dev, &dev_attr_roomlevel); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_frequency); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_6_frequency); device_remove_file(dev, &dev_attr_stomp_param_1_note_value); device_remove_file(dev, &dev_attr_stomp_param_2); device_remove_file(dev, &dev_attr_stomp_param_3); device_remove_file(dev, &dev_attr_stomp_param_4); device_remove_file(dev, &dev_attr_stomp_param_5); device_remove_file(dev, &dev_attr_stomp_param_6); if ((type & (LINE6_BITS_LIVE)) != 0) device_remove_file(dev, &dev_attr_amp_switch_select); device_remove_file(dev, &dev_attr_delay_param_4); device_remove_file(dev, &dev_attr_delay_param_5); device_remove_file(dev, &dev_attr_delay_pre_post); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_delay_model); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_delay_verb_model); device_remove_file(dev, &dev_attr_tempo_msb); device_remove_file(dev, &dev_attr_tempo_lsb); if (firmware >= 300) device_remove_file(dev, &dev_attr_wah_model); if (firmware >= 214) device_remove_file(dev, &dev_attr_bypass_volume); if ((type & (LINE6_BITS_PRO)) != 0) device_remove_file(dev, &dev_attr_fx_loop_on_off); device_remove_file(dev, &dev_attr_tweak_param_select); device_remove_file(dev, &dev_attr_amp1_engage); if (firmware >= 200) device_remove_file(dev, &dev_attr_band_1_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_gain__bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_gain__bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_gain__bass); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_5_gain__bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_6_gain__bass); } int line6_variax_create_files(int firmware, int type, struct device *dev) { int err; CHECK_RETURN(device_create_file(dev, &dev_attr_body)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_type)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_position)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_angle)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_level)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_type)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_position)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_angle)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_level)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup_phase)); CHECK_RETURN(device_create_file(dev, &dev_attr_capacitance)); CHECK_RETURN(device_create_file(dev, &dev_attr_tone_resistance)); CHECK_RETURN(device_create_file(dev, &dev_attr_volume_resistance)); CHECK_RETURN(device_create_file(dev, &dev_attr_taper)); CHECK_RETURN(device_create_file(dev, &dev_attr_tone_dump)); CHECK_RETURN(device_create_file(dev, &dev_attr_save_tone)); CHECK_RETURN(device_create_file(dev, &dev_attr_volume_dump)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning6)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning5)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning4)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning3)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning2)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning1)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune6)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune5)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune4)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune3)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune2)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune1)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix6)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix5)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix4)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix3)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix2)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix1)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup_wiring)); return 0; } void line6_variax_remove_files(int firmware, int type, struct device *dev) { device_remove_file(dev, &dev_attr_body); device_remove_file(dev, &dev_attr_pickup1_enable); device_remove_file(dev, &dev_attr_pickup1_type); device_remove_file(dev, &dev_attr_pickup1_position); device_remove_file(dev, &dev_attr_pickup1_angle); device_remove_file(dev, &dev_attr_pickup1_level); device_remove_file(dev, &dev_attr_pickup2_enable); device_remove_file(dev, &dev_attr_pickup2_type); device_remove_file(dev, &dev_attr_pickup2_position); device_remove_file(dev, &dev_attr_pickup2_angle); device_remove_file(dev, &dev_attr_pickup2_level); device_remove_file(dev, &dev_attr_pickup_phase); device_remove_file(dev, &dev_attr_capacitance); device_remove_file(dev, &dev_attr_tone_resistance); device_remove_file(dev, &dev_attr_volume_resistance); device_remove_file(dev, &dev_attr_taper); device_remove_file(dev, &dev_attr_tone_dump); device_remove_file(dev, &dev_attr_save_tone); device_remove_file(dev, &dev_attr_volume_dump); device_remove_file(dev, &dev_attr_tuning_enable); device_remove_file(dev, &dev_attr_tuning6); device_remove_file(dev, &dev_attr_tuning5); device_remove_file(dev, &dev_attr_tuning4); device_remove_file(dev, &dev_attr_tuning3); device_remove_file(dev, &dev_attr_tuning2); device_remove_file(dev, &dev_attr_tuning1); device_remove_file(dev, &dev_attr_detune6); device_remove_file(dev, &dev_attr_detune5); device_remove_file(dev, &dev_attr_detune4); device_remove_file(dev, &dev_attr_detune3); device_remove_file(dev, &dev_attr_detune2); device_remove_file(dev, &dev_attr_detune1); device_remove_file(dev, &dev_attr_mix6); device_remove_file(dev, &dev_attr_mix5); device_remove_file(dev, &dev_attr_mix4); device_remove_file(dev, &dev_attr_mix3); device_remove_file(dev, &dev_attr_mix2); device_remove_file(dev, &dev_attr_mix1); device_remove_file(dev, &dev_attr_pickup_wiring); }
gpl-2.0
hwoarang/linux
drivers/net/phy/mdio-bitbang.c
8741
5751
/* * Bitbanged MDIO support. * * Author: Scott Wood <scottwood@freescale.com> * Copyright (c) 2007 Freescale Semiconductor * * Based on CPM2 MDIO code which is: * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/mdio-bitbang.h> #include <linux/types.h> #include <linux/delay.h> #define MDIO_READ 2 #define MDIO_WRITE 1 #define MDIO_C45 (1<<15) #define MDIO_C45_ADDR (MDIO_C45 | 0) #define MDIO_C45_READ (MDIO_C45 | 3) #define MDIO_C45_WRITE (MDIO_C45 | 1) #define MDIO_SETUP_TIME 10 #define MDIO_HOLD_TIME 10 /* Minimum MDC period is 400 ns, plus some margin for error. MDIO_DELAY * is done twice per period. */ #define MDIO_DELAY 250 /* The PHY may take up to 300 ns to produce data, plus some margin * for error. */ #define MDIO_READ_DELAY 350 /* MDIO must already be configured as output. */ static void mdiobb_send_bit(struct mdiobb_ctrl *ctrl, int val) { const struct mdiobb_ops *ops = ctrl->ops; ops->set_mdio_data(ctrl, val); ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 1); ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 0); } /* MDIO must already be configured as input. */ static int mdiobb_get_bit(struct mdiobb_ctrl *ctrl) { const struct mdiobb_ops *ops = ctrl->ops; ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 1); ndelay(MDIO_READ_DELAY); ops->set_mdc(ctrl, 0); return ops->get_mdio_data(ctrl); } /* MDIO must already be configured as output. */ static void mdiobb_send_num(struct mdiobb_ctrl *ctrl, u16 val, int bits) { int i; for (i = bits - 1; i >= 0; i--) mdiobb_send_bit(ctrl, (val >> i) & 1); } /* MDIO must already be configured as input. */ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) { int i; u16 ret = 0; for (i = bits - 1; i >= 0; i--) { ret <<= 1; ret |= mdiobb_get_bit(ctrl); } return ret; } /* Utility to send the preamble, address, and * register (common to read and write). */ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) { const struct mdiobb_ops *ops = ctrl->ops; int i; ops->set_mdio_dir(ctrl, 1); /* * Send a 32 bit preamble ('1's) with an extra '1' bit for good * measure. The IEEE spec says this is a PHY optional * requirement. The AMD 79C874 requires one after power up and * one after a MII communications error. This means that we are * doing more preambles than we need, but it is safer and will be * much more robust. */ for (i = 0; i < 32; i++) mdiobb_send_bit(ctrl, 1); /* send the start bit (01) and the read opcode (10) or write (10). Clause 45 operation uses 00 for the start and 11, 10 for read/write */ mdiobb_send_bit(ctrl, 0); if (op & MDIO_C45) mdiobb_send_bit(ctrl, 0); else mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, (op >> 1) & 1); mdiobb_send_bit(ctrl, (op >> 0) & 1); mdiobb_send_num(ctrl, phy, 5); mdiobb_send_num(ctrl, reg, 5); } /* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the lower 16 bits of the 21 bit address. This transfer is done identically to a MDIO_WRITE except for a different code. To enable clause 45 mode or MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices can exist on the same bus. Normal devices should ignore the MDIO_ADDR phase. */ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) { unsigned int dev_addr = (addr >> 16) & 0x1F; unsigned int reg = addr & 0xFFFF; mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, 0); mdiobb_send_num(ctrl, reg, 16); ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); return dev_addr; } static int mdiobb_read(struct mii_bus *bus, int phy, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; int ret, i; if (reg & MII_ADDR_C45) { reg = mdiobb_cmd_addr(ctrl, phy, reg); mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); } else mdiobb_cmd(ctrl, MDIO_READ, phy, reg); ctrl->ops->set_mdio_dir(ctrl, 0); /* check the turnaround bit: the PHY should be driving it to zero */ if (mdiobb_get_bit(ctrl) != 0) { /* PHY didn't drive TA low -- flush any bits it * may be trying to send. */ for (i = 0; i < 32; i++) mdiobb_get_bit(ctrl); return 0xffff; } ret = mdiobb_get_num(ctrl, 16); mdiobb_get_bit(ctrl); return ret; } static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; if (reg & MII_ADDR_C45) { reg = mdiobb_cmd_addr(ctrl, phy, reg); mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); } else mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, 0); mdiobb_send_num(ctrl, val, 16); ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); return 0; } static int mdiobb_reset(struct mii_bus *bus) { struct mdiobb_ctrl *ctrl = bus->priv; if (ctrl->reset) ctrl->reset(bus); return 0; } struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; bus = mdiobus_alloc(); if (!bus) return NULL; __module_get(ctrl->ops->owner); bus->read = mdiobb_read; bus->write = mdiobb_write; bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; } EXPORT_SYMBOL(alloc_mdio_bitbang); void free_mdio_bitbang(struct mii_bus *bus) { struct mdiobb_ctrl *ctrl = bus->priv; module_put(ctrl->ops->owner); mdiobus_free(bus); } EXPORT_SYMBOL(free_mdio_bitbang); MODULE_LICENSE("GPL");
gpl-2.0
mixianghang/mptcp
drivers/net/phy/mdio-bitbang.c
8741
5751
/* * Bitbanged MDIO support. * * Author: Scott Wood <scottwood@freescale.com> * Copyright (c) 2007 Freescale Semiconductor * * Based on CPM2 MDIO code which is: * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/mdio-bitbang.h> #include <linux/types.h> #include <linux/delay.h> #define MDIO_READ 2 #define MDIO_WRITE 1 #define MDIO_C45 (1<<15) #define MDIO_C45_ADDR (MDIO_C45 | 0) #define MDIO_C45_READ (MDIO_C45 | 3) #define MDIO_C45_WRITE (MDIO_C45 | 1) #define MDIO_SETUP_TIME 10 #define MDIO_HOLD_TIME 10 /* Minimum MDC period is 400 ns, plus some margin for error. MDIO_DELAY * is done twice per period. */ #define MDIO_DELAY 250 /* The PHY may take up to 300 ns to produce data, plus some margin * for error. */ #define MDIO_READ_DELAY 350 /* MDIO must already be configured as output. */ static void mdiobb_send_bit(struct mdiobb_ctrl *ctrl, int val) { const struct mdiobb_ops *ops = ctrl->ops; ops->set_mdio_data(ctrl, val); ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 1); ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 0); } /* MDIO must already be configured as input. */ static int mdiobb_get_bit(struct mdiobb_ctrl *ctrl) { const struct mdiobb_ops *ops = ctrl->ops; ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 1); ndelay(MDIO_READ_DELAY); ops->set_mdc(ctrl, 0); return ops->get_mdio_data(ctrl); } /* MDIO must already be configured as output. */ static void mdiobb_send_num(struct mdiobb_ctrl *ctrl, u16 val, int bits) { int i; for (i = bits - 1; i >= 0; i--) mdiobb_send_bit(ctrl, (val >> i) & 1); } /* MDIO must already be configured as input. */ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) { int i; u16 ret = 0; for (i = bits - 1; i >= 0; i--) { ret <<= 1; ret |= mdiobb_get_bit(ctrl); } return ret; } /* Utility to send the preamble, address, and * register (common to read and write). */ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) { const struct mdiobb_ops *ops = ctrl->ops; int i; ops->set_mdio_dir(ctrl, 1); /* * Send a 32 bit preamble ('1's) with an extra '1' bit for good * measure. The IEEE spec says this is a PHY optional * requirement. The AMD 79C874 requires one after power up and * one after a MII communications error. This means that we are * doing more preambles than we need, but it is safer and will be * much more robust. */ for (i = 0; i < 32; i++) mdiobb_send_bit(ctrl, 1); /* send the start bit (01) and the read opcode (10) or write (10). Clause 45 operation uses 00 for the start and 11, 10 for read/write */ mdiobb_send_bit(ctrl, 0); if (op & MDIO_C45) mdiobb_send_bit(ctrl, 0); else mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, (op >> 1) & 1); mdiobb_send_bit(ctrl, (op >> 0) & 1); mdiobb_send_num(ctrl, phy, 5); mdiobb_send_num(ctrl, reg, 5); } /* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the lower 16 bits of the 21 bit address. This transfer is done identically to a MDIO_WRITE except for a different code. To enable clause 45 mode or MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices can exist on the same bus. Normal devices should ignore the MDIO_ADDR phase. */ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) { unsigned int dev_addr = (addr >> 16) & 0x1F; unsigned int reg = addr & 0xFFFF; mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, 0); mdiobb_send_num(ctrl, reg, 16); ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); return dev_addr; } static int mdiobb_read(struct mii_bus *bus, int phy, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; int ret, i; if (reg & MII_ADDR_C45) { reg = mdiobb_cmd_addr(ctrl, phy, reg); mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); } else mdiobb_cmd(ctrl, MDIO_READ, phy, reg); ctrl->ops->set_mdio_dir(ctrl, 0); /* check the turnaround bit: the PHY should be driving it to zero */ if (mdiobb_get_bit(ctrl) != 0) { /* PHY didn't drive TA low -- flush any bits it * may be trying to send. */ for (i = 0; i < 32; i++) mdiobb_get_bit(ctrl); return 0xffff; } ret = mdiobb_get_num(ctrl, 16); mdiobb_get_bit(ctrl); return ret; } static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; if (reg & MII_ADDR_C45) { reg = mdiobb_cmd_addr(ctrl, phy, reg); mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); } else mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, 0); mdiobb_send_num(ctrl, val, 16); ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); return 0; } static int mdiobb_reset(struct mii_bus *bus) { struct mdiobb_ctrl *ctrl = bus->priv; if (ctrl->reset) ctrl->reset(bus); return 0; } struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; bus = mdiobus_alloc(); if (!bus) return NULL; __module_get(ctrl->ops->owner); bus->read = mdiobb_read; bus->write = mdiobb_write; bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; } EXPORT_SYMBOL(alloc_mdio_bitbang); void free_mdio_bitbang(struct mii_bus *bus) { struct mdiobb_ctrl *ctrl = bus->priv; module_put(ctrl->ops->owner); mdiobus_free(bus); } EXPORT_SYMBOL(free_mdio_bitbang); MODULE_LICENSE("GPL");
gpl-2.0
ChangYeoun/10.1
arch/parisc/math-emu/fpudispatch.c
8741
39225
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/fp/fpudispatch.c $Revision: 1.1 $ * * Purpose: * <<please update with a synopsis of the functionality provided by this file>> * * External Interfaces: * <<the following list was autogenerated, please review>> * emfpudispatch(ir, dummy1, dummy2, fpregs) * fpudispatch(ir, excp_code, holder, fpregs) * * Internal Interfaces: * <<the following list was autogenerated, please review>> * static u_int decode_06(u_int, u_int *) * static u_int decode_0c(u_int, u_int, u_int, u_int *) * static u_int decode_0e(u_int, u_int, u_int, u_int *) * static u_int decode_26(u_int, u_int *) * static u_int decode_2e(u_int, u_int *) * static void update_status_cbit(u_int *, u_int, u_int, u_int) * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #define FPUDEBUG 0 #include "float.h" #include <linux/bug.h> #include <linux/kernel.h> #include <asm/processor.h> /* #include <sys/debug.h> */ /* #include <machine/sys/mdep_private.h> */ #define COPR_INST 0x30000000 /* * definition of extru macro. If pos and len are constants, the compiler * will generate an extru instruction when optimized */ #define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1)) /* definitions of bit field locations in the instruction */ #define fpmajorpos 5 #define fpr1pos 10 #define fpr2pos 15 #define fptpos 31 #define fpsubpos 18 #define fpclass1subpos 16 #define fpclasspos 22 #define fpfmtpos 20 #define fpdfpos 18 #define fpnulpos 26 /* * the following are the extra bits for the 0E major op */ #define fpxr1pos 24 #define fpxr2pos 19 #define fpxtpos 25 #define fpxpos 23 #define fp0efmtpos 20 /* * the following are for the multi-ops */ #define fprm1pos 10 #define fprm2pos 15 #define fptmpos 31 #define fprapos 25 #define fptapos 20 #define fpmultifmt 26 /* * the following are for the fused FP instructions */ /* fprm1pos 10 */ /* fprm2pos 15 */ #define fpraupos 18 #define fpxrm2pos 19 /* fpfmtpos 20 */ #define fpralpos 23 #define fpxrm1pos 24 /* fpxtpos 25 */ #define fpfusedsubop 26 /* fptpos 31 */ /* * offset to constant zero in the FP emulation registers */ #define fpzeroreg (32*sizeof(double)/sizeof(u_int)) /* * extract the major opcode from the instruction */ #define get_major(op) extru(op,fpmajorpos,6) /* * extract the two bit class field from the FP instruction. The class is at bit * positions 21-22 */ #define get_class(op) extru(op,fpclasspos,2) /* * extract the 3 bit subop field. For all but class 1 instructions, it is * located at bit positions 16-18 */ #define get_subop(op) extru(op,fpsubpos,3) /* * extract the 2 or 3 bit subop field from class 1 instructions. It is located * at bit positions 15-16 (PA1.1) or 14-16 (PA2.0) */ #define get_subop1_PA1_1(op) extru(op,fpclass1subpos,2) /* PA89 (1.1) fmt */ #define get_subop1_PA2_0(op) extru(op,fpclass1subpos,3) /* PA 2.0 fmt */ /* definitions of unimplemented exceptions */ #define MAJOR_0C_EXCP 0x09 #define MAJOR_0E_EXCP 0x0b #define MAJOR_06_EXCP 0x03 #define MAJOR_26_EXCP 0x23 #define MAJOR_2E_EXCP 0x2b #define PA83_UNIMP_EXCP 0x01 /* * Special Defines for TIMEX specific code */ #define FPU_TYPE_FLAG_POS (EM_FPU_TYPE_OFFSET>>2) #define TIMEX_ROLEX_FPU_MASK (TIMEX_EXTEN_FLAG|ROLEX_EXTEN_FLAG) /* * Static function definitions */ #define _PROTOTYPES #if defined(_PROTOTYPES) || defined(_lint) static u_int decode_0c(u_int, u_int, u_int, u_int *); static u_int decode_0e(u_int, u_int, u_int, u_int *); static u_int decode_06(u_int, u_int *); static u_int decode_26(u_int, u_int *); static u_int decode_2e(u_int, u_int *); static void update_status_cbit(u_int *, u_int, u_int, u_int); #else /* !_PROTOTYPES&&!_lint */ static u_int decode_0c(); static u_int decode_0e(); static u_int decode_06(); static u_int decode_26(); static u_int decode_2e(); static void update_status_cbit(); #endif /* _PROTOTYPES&&!_lint */ #define VASSERT(x) static void parisc_linux_get_fpu_type(u_int fpregs[]) { /* on pa-linux the fpu type is not filled in by the * caller; it is constructed here */ if (boot_cpu_data.cpu_type == pcxs) fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG; else if (boot_cpu_data.cpu_type == pcxt || boot_cpu_data.cpu_type == pcxt_) fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG; else if (boot_cpu_data.cpu_type >= pcxu) fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG; } /* * this routine will decode the excepting floating point instruction and * call the approiate emulation routine. * It is called by decode_fpu with the following parameters: * fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register) * where current_ir is the instruction to be emulated, * unimplemented_code is the exception_code that the hardware generated * and &Fpu_register is the address of emulated FP reg 0. */ u_int fpudispatch(u_int ir, u_int excp_code, u_int holder, u_int fpregs[]) { u_int class, subop; u_int fpu_type_flags; /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); parisc_linux_get_fpu_type(fpregs); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ class = get_class(ir); if (class == 1) { if (fpu_type_flags & PA2_0_FPU_FLAG) subop = get_subop1_PA2_0(ir); else subop = get_subop1_PA1_1(ir); } else subop = get_subop(ir); if (FPUDEBUG) printk("class %d subop %d\n", class, subop); switch (excp_code) { case MAJOR_0C_EXCP: case PA83_UNIMP_EXCP: return(decode_0c(ir,class,subop,fpregs)); case MAJOR_0E_EXCP: return(decode_0e(ir,class,subop,fpregs)); case MAJOR_06_EXCP: return(decode_06(ir,fpregs)); case MAJOR_26_EXCP: return(decode_26(ir,fpregs)); case MAJOR_2E_EXCP: return(decode_2e(ir,fpregs)); default: /* "crashme Night Gallery painting nr 2. (asm_crash.s). * This was fixed for multi-user kernels, but * workstation kernels had a panic here. This allowed * any arbitrary user to panic the kernel by executing * setting the FP exception registers to strange values * and generating an emulation trap. The emulation and * exception code must never be able to panic the * kernel. */ return(UNIMPLEMENTEDEXCEPTION); } } /* * this routine is called by $emulation_trap to emulate a coprocessor * instruction if one doesn't exist */ u_int emfpudispatch(u_int ir, u_int dummy1, u_int dummy2, u_int fpregs[]) { u_int class, subop, major; u_int fpu_type_flags; /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ major = get_major(ir); class = get_class(ir); if (class == 1) { if (fpu_type_flags & PA2_0_FPU_FLAG) subop = get_subop1_PA2_0(ir); else subop = get_subop1_PA1_1(ir); } else subop = get_subop(ir); switch (major) { case 0x0C: return(decode_0c(ir,class,subop,fpregs)); case 0x0E: return(decode_0e(ir,class,subop,fpregs)); case 0x06: return(decode_06(ir,fpregs)); case 0x26: return(decode_26(ir,fpregs)); case 0x2E: return(decode_2e(ir,fpregs)); default: return(PA83_UNIMP_EXCP); } } static u_int decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) { u_int r1,r2,t; /* operand register offsets */ u_int fmt; /* also sf for class 1 conversions */ u_int df; /* for class 1 conversions */ u_int *status; u_int retval, local_status; u_int fpu_type_flags; if (ir == COPR_INST) { fpregs[0] = EMULATION_VERSION << 11; return(NOEXCEPTION); } status = &fpregs[0]; /* fp status register */ local_status = fpregs[0]; /* and local copy */ r1 = extru(ir,fpr1pos,5) * sizeof(double)/sizeof(u_int); if (r1 == 0) /* map fr0 source to constant zero */ r1 = fpzeroreg; t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int); if (t == 0 && class != 2) /* don't allow fr0 as a dest */ return(MAJOR_0C_EXCP); fmt = extru(ir,fpfmtpos,2); /* get fmt completer */ switch (class) { case 0: switch (subop) { case 0: /* COPR 0,0 emulated above*/ case 1: return(MAJOR_0C_EXCP); case 2: /* FCPY */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } case 3: /* FABS */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and clear sign bit */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } case 6: /* FNEG */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and invert sign bit */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } case 7: /* FNEGABS */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and set sign bit */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } case 4: /* FSQRT */ switch (fmt) { case 0: return(sgl_fsqrt(&fpregs[r1],0, &fpregs[t],status)); case 1: return(dbl_fsqrt(&fpregs[r1],0, &fpregs[t],status)); case 2: case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 5: /* FRND */ switch (fmt) { case 0: return(sgl_frnd(&fpregs[r1],0, &fpregs[t],status)); case 1: return(dbl_frnd(&fpregs[r1],0, &fpregs[t],status)); case 2: case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } /* end of switch (subop) */ case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ if ((df & 2) || (fmt & 2)) { /* * fmt's 2 and 3 are illegal of not implemented * quad conversions */ return(MAJOR_0C_EXCP); } /* * encode source and dest formats into 2 bits. * high bit is source, low bit is dest. * bit = 1 --> double precision */ fmt = (fmt << 1) | df; switch (subop) { case 0: /* FCNVFF */ switch(fmt) { case 0: /* sgl/sgl */ return(MAJOR_0C_EXCP); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(MAJOR_0C_EXCP); } case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ case 2: /* class 2 */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int); if (r2 == 0) r2 = fpzeroreg; if (fpu_type_flags & PA2_0_FPU_FLAG) { /* FTEST if nullify bit set, otherwise FCMP */ if (extru(ir, fpnulpos, 1)) { /* FTEST */ switch (fmt) { case 0: /* * arg0 is not used * second param is the t field used for * ftest,acc and ftest,rej * third param is the subop (y-field) */ BUG(); /* Unsupported * return(ftest(0L,extru(ir,fptpos,5), * &fpregs[0],subop)); */ case 1: case 2: case 3: return(MAJOR_0C_EXCP); } } else { /* FCMP */ switch (fmt) { case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } } /* end of if for PA2.0 */ else { /* PA1.0 & PA1.1 */ switch (subop) { case 2: case 3: case 4: case 5: case 6: case 7: return(MAJOR_0C_EXCP); case 0: /* FCMP */ switch (fmt) { case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 1: /* FTEST */ switch (fmt) { case 0: /* * arg0 is not used * second param is the t field used for * ftest,acc and ftest,rej * third param is the subop (y-field) */ BUG(); /* unsupported * return(ftest(0L,extru(ir,fptpos,5), * &fpregs[0],subop)); */ case 1: case 2: case 3: return(MAJOR_0C_EXCP); } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ case 3: /* class 3 */ r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int); if (r2 == 0) r2 = fpzeroreg; switch (subop) { case 5: case 6: case 7: return(MAJOR_0C_EXCP); case 0: /* FADD */ switch (fmt) { case 0: return(sgl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 1: /* FSUB */ switch (fmt) { case 0: return(sgl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 2: /* FMPY */ switch (fmt) { case 0: return(sgl_fmpy(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fmpy(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 3: /* FDIV */ switch (fmt) { case 0: return(sgl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 4: /* FREM */ switch (fmt) { case 0: return(sgl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } /* end of class 3 switch */ } /* end of switch(class) */ /* If we get here, something is really wrong! */ return(MAJOR_0C_EXCP); } static u_int decode_0e(ir,class,subop,fpregs) u_int ir,class,subop; u_int fpregs[]; { u_int r1,r2,t; /* operand register offsets */ u_int fmt; /* also sf for class 1 conversions */ u_int df; /* dest format for class 1 conversions */ u_int *status; u_int retval, local_status; u_int fpu_type_flags; status = &fpregs[0]; local_status = fpregs[0]; r1 = ((extru(ir,fpr1pos,5)<<1)|(extru(ir,fpxr1pos,1))); if (r1 == 0) r1 = fpzeroreg; t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1))); if (t == 0 && class != 2) return(MAJOR_0E_EXCP); if (class < 2) /* class 0 or 1 has 2 bit fmt */ fmt = extru(ir,fpfmtpos,2); else /* class 2 and 3 have 1 bit fmt */ fmt = extru(ir,fp0efmtpos,1); /* * An undefined combination, double precision accessing the * right half of a FPR, can get us into trouble. * Let's just force proper alignment on it. */ if (fmt == DBL) { r1 &= ~1; if (class != 1) t &= ~1; } switch (class) { case 0: switch (subop) { case 0: /* unimplemented */ case 1: return(MAJOR_0E_EXCP); case 2: /* FCPY */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } case 3: /* FABS */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } case 6: /* FNEG */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } case 7: /* FNEGABS */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } case 4: /* FSQRT */ switch (fmt) { case 0: return(sgl_fsqrt(&fpregs[r1],0, &fpregs[t], status)); case 1: return(dbl_fsqrt(&fpregs[r1],0, &fpregs[t], status)); case 2: case 3: return(MAJOR_0E_EXCP); } case 5: /* FRMD */ switch (fmt) { case 0: return(sgl_frnd(&fpregs[r1],0, &fpregs[t], status)); case 1: return(dbl_frnd(&fpregs[r1],0, &fpregs[t], status)); case 2: case 3: return(MAJOR_0E_EXCP); } } /* end of switch (subop */ case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ /* * Fix Crashme problem (writing to 31R in double precision) * here too. */ if (df == DBL) { t &= ~1; } if ((df & 2) || (fmt & 2)) return(MAJOR_0E_EXCP); fmt = (fmt << 1) | df; switch (subop) { case 0: /* FCNVFF */ switch(fmt) { case 0: /* sgl/sgl */ return(MAJOR_0E_EXCP); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(MAJOR_0E_EXCP); } case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ case 2: /* class 2 */ /* * Be careful out there. * Crashme can generate cases where FR31R is specified * as the source or target of a double precision operation. * Since we just pass the address of the floating-point * register to the emulation routines, this can cause * corruption of fpzeroreg. */ if (fmt == DBL) r2 = (extru(ir,fpr2pos,5)<<1); else r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1))); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; if (r2 == 0) r2 = fpzeroreg; if (fpu_type_flags & PA2_0_FPU_FLAG) { /* FTEST if nullify bit set, otherwise FCMP */ if (extru(ir, fpnulpos, 1)) { /* FTEST */ /* not legal */ return(MAJOR_0E_EXCP); } else { /* FCMP */ switch (fmt) { /* * fmt is only 1 bit long */ case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); } } } /* end of if for PA2.0 */ else { /* PA1.0 & PA1.1 */ switch (subop) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: return(MAJOR_0E_EXCP); case 0: /* FCMP */ switch (fmt) { /* * fmt is only 1 bit long */ case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ case 3: /* class 3 */ /* * Be careful out there. * Crashme can generate cases where FR31R is specified * as the source or target of a double precision operation. * Since we just pass the address of the floating-point * register to the emulation routines, this can cause * corruption of fpzeroreg. */ if (fmt == DBL) r2 = (extru(ir,fpr2pos,5)<<1); else r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1))); if (r2 == 0) r2 = fpzeroreg; switch (subop) { case 5: case 6: case 7: return(MAJOR_0E_EXCP); /* * Note that fmt is only 1 bit for class 3 */ case 0: /* FADD */ switch (fmt) { case 0: return(sgl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 1: /* FSUB */ switch (fmt) { case 0: return(sgl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 2: /* FMPY or XMPYU */ /* * check for integer multiply (x bit set) */ if (extru(ir,fpxpos,1)) { /* * emulate XMPYU */ switch (fmt) { case 0: /* * bad instruction if t specifies * the right half of a register */ if (t & 1) return(MAJOR_0E_EXCP); BUG(); /* unsupported * impyu(&fpregs[r1],&fpregs[r2], * &fpregs[t]); */ return(NOEXCEPTION); case 1: return(MAJOR_0E_EXCP); } } else { /* FMPY */ switch (fmt) { case 0: return(sgl_fmpy(&fpregs[r1], &fpregs[r2],&fpregs[t],status)); case 1: return(dbl_fmpy(&fpregs[r1], &fpregs[r2],&fpregs[t],status)); } } case 3: /* FDIV */ switch (fmt) { case 0: return(sgl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 4: /* FREM */ switch (fmt) { case 0: return(sgl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } } /* end of class 3 switch */ } /* end of switch(class) */ /* If we get here, something is really wrong! */ return(MAJOR_0E_EXCP); } /* * routine to decode the 06 (FMPYADD and FMPYCFXT) instruction */ static u_int decode_06(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, tm, ra, ta; /* operands */ u_int fmt; u_int error = 0; u_int status; u_int fpu_type_flags; union { double dbl; float flt; struct { u_int i1; u_int i2; } ints; } mtmp, atmp; status = fpregs[0]; /* use a local copy of status reg */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */ if (fmt == 0) { /* DBL */ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int); if (tm == 0) return(MAJOR_06_EXCP); ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int); ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int); if (ta == 0) return(MAJOR_06_EXCP); if (fpu_type_flags & TIMEX_ROLEX_FPU_MASK) { if (ra == 0) { /* special case FMPYCFXT, see sgl case below */ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2], &mtmp.ints.i1,&status)) error = 1; if (dbl_to_sgl_fcnvfxt(&fpregs[ta], &atmp.ints.i1,&atmp.ints.i1,&status)) error = 1; } else { if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } } else { if (ra == 0) ra = fpzeroreg; if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } if (error) return(MAJOR_06_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[tm+1] = mtmp.ints.i2; fpregs[ta] = atmp.ints.i1; fpregs[ta+1] = atmp.ints.i2; fpregs[0] = status; return(NOEXCEPTION); } } else { /* SGL */ /* * calculate offsets for single precision numbers * See table 6-14 in PA-89 architecture for mapping */ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */ tm |= extru(ir,fptmpos-4,1); /* add right word offset */ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */ ra |= extru(ir,fprapos-4,1); /* add right word offset */ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */ ta |= extru(ir,fptapos-4,1); /* add right word offset */ if (ra == 0x20 &&(fpu_type_flags & TIMEX_ROLEX_FPU_MASK)) { /* special case FMPYCFXT (really 0) * This instruction is only present on the Timex and * Rolex fpu's in so if it is the special case and * one of these fpu's we run the FMPYCFXT instruction */ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (sgl_to_sgl_fcnvfxt(&fpregs[ta],&atmp.ints.i1, &atmp.ints.i1,&status)) error = 1; } else { if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (sgl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } if (error) return(MAJOR_06_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[ta] = atmp.ints.i1; fpregs[0] = status; return(NOEXCEPTION); } } } /* * routine to decode the 26 (FMPYSUB) instruction */ static u_int decode_26(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, tm, ra, ta; /* operands */ u_int fmt; u_int error = 0; u_int status; union { double dbl; float flt; struct { u_int i1; u_int i2; } ints; } mtmp, atmp; status = fpregs[0]; fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */ if (fmt == 0) { /* DBL */ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int); if (tm == 0) return(MAJOR_26_EXCP); ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int); if (ra == 0) return(MAJOR_26_EXCP); ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int); if (ta == 0) return(MAJOR_26_EXCP); if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status)) error = 1; if (dbl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status)) error = 1; if (error) return(MAJOR_26_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[tm+1] = mtmp.ints.i2; fpregs[ta] = atmp.ints.i1; fpregs[ta+1] = atmp.ints.i2; fpregs[0] = status; return(NOEXCEPTION); } } else { /* SGL */ /* * calculate offsets for single precision numbers * See table 6-14 in PA-89 architecture for mapping */ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */ tm |= extru(ir,fptmpos-4,1); /* add right word offset */ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */ ra |= extru(ir,fprapos-4,1); /* add right word offset */ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */ ta |= extru(ir,fptapos-4,1); /* add right word offset */ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status)) error = 1; if (sgl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status)) error = 1; if (error) return(MAJOR_26_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[ta] = atmp.ints.i1; fpregs[0] = status; return(NOEXCEPTION); } } } /* * routine to decode the 2E (FMPYFADD,FMPYNFADD) instructions */ static u_int decode_2e(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, ra, t; /* operands */ u_int fmt; fmt = extru(ir,fpfmtpos,1); /* get fmt completer */ if (fmt == DBL) { /* DBL */ rm1 = extru(ir,fprm1pos,5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir,fprm2pos,5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; ra = ((extru(ir,fpraupos,3)<<2)|(extru(ir,fpralpos,3)>>1)) * sizeof(double)/sizeof(u_int); if (ra == 0) ra = fpzeroreg; t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int); if (t == 0) return(MAJOR_2E_EXCP); if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */ return(dbl_fmpynfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } else { return(dbl_fmpyfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } } /* end DBL */ else { /* SGL */ rm1 = (extru(ir,fprm1pos,5)<<1)|(extru(ir,fpxrm1pos,1)); if (rm1 == 0) rm1 = fpzeroreg; rm2 = (extru(ir,fprm2pos,5)<<1)|(extru(ir,fpxrm2pos,1)); if (rm2 == 0) rm2 = fpzeroreg; ra = (extru(ir,fpraupos,3)<<3)|extru(ir,fpralpos,3); if (ra == 0) ra = fpzeroreg; t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1))); if (t == 0) return(MAJOR_2E_EXCP); if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */ return(sgl_fmpynfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } else { return(sgl_fmpyfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } } /* end SGL */ } /* * update_status_cbit * * This routine returns the correct FP status register value in * *status, based on the C-bit & V-bit returned by the FCMP * emulation routine in new_status. The architecture type * (PA83, PA89 or PA2.0) is available in fpu_type. The y_field * and the architecture type are used to determine what flavor * of FCMP is being emulated. */ static void update_status_cbit(status, new_status, fpu_type, y_field) u_int *status, new_status; u_int fpu_type; u_int y_field; { /* * For PA89 FPU's which implement the Compare Queue and * for PA2.0 FPU's, update the Compare Queue if the y-field = 0, * otherwise update the specified bit in the Compare Array. * Note that the y-field will always be 0 for non-PA2.0 FPU's. */ if ((fpu_type & TIMEX_EXTEN_FLAG) || (fpu_type & ROLEX_EXTEN_FLAG) || (fpu_type & PA2_0_FPU_FLAG)) { if (y_field == 0) { *status = ((*status & 0x04000000) >> 5) | /* old Cbit */ ((*status & 0x003ff000) >> 1) | /* old CQ */ (new_status & 0xffc007ff); /* all other bits*/ } else { *status = (*status & 0x04000000) | /* old Cbit */ ((new_status & 0x04000000) >> (y_field+4)) | (new_status & ~0x04000000 & /* other bits */ ~(0x04000000 >> (y_field+4))); } } /* if PA83, just update the C-bit */ else { *status = new_status; } }
gpl-2.0
brymaster5000/m7wlv_4.2.2
drivers/net/can/sja1000/ems_pci.c
8997
10343
/* * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com> * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/can/dev.h> #include <linux/io.h> #include "sja1000.h" #define DRV_NAME "ems_pci" MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards"); MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCI_V1_MAX_CHAN 2 #define EMS_PCI_V2_MAX_CHAN 4 #define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN struct ems_pci_card { int version; int channels; struct pci_dev *pci_dev; struct net_device *net_dev[EMS_PCI_MAX_CHAN]; void __iomem *conf_addr; void __iomem *base_addr; }; #define EMS_PCI_CAN_CLOCK (16000000 / 2) /* * Register definitions and descriptions are from LinCAN 0.3.3. * * PSB4610 PITA-2 bridge control registers */ #define PITA2_ICR 0x00 /* Interrupt Control Register */ #define PITA2_ICR_INT0 0x00000002 /* [RC] INT0 Active/Clear */ #define PITA2_ICR_INT0_EN 0x00020000 /* [RW] Enable INT0 */ #define PITA2_MISC 0x1c /* Miscellaneous Register */ #define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */ /* * Register definitions for the PLX 9030 */ #define PLX_ICSR 0x4c /* Interrupt Control/Status register */ #define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */ #define PLX_ICSR_PCIINT_ENA 0x0040 /* PCI Interrupt Enable */ #define PLX_ICSR_LINTI1_CLR 0x0400 /* Local Edge Triggerable Interrupt Clear */ #define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \ PLX_ICSR_LINTI1_CLR) /* * The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode, push-pull and the correct polarity. */ #define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define EMS_PCI_V1_BASE_BAR 1 #define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */ #define EMS_PCI_V2_BASE_BAR 2 #define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */ #define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */ #define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ #define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = { /* CPC-PCI v1 */ {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, /* CPC-PCI v2 */ {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000}, /* CPC-104P v2 */ {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002}, {0,} }; MODULE_DEVICE_TABLE(pci, ems_pci_tbl); /* * Helper to read internal registers from card logic (not CAN) */ static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port) { return readb(card->base_addr + (port * 4)); } static u8 ems_pci_v1_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + (port * 4)); } static void ems_pci_v1_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + (port * 4)); } static void ems_pci_v1_post_irq(const struct sja1000_priv *priv) { struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr + PITA2_ICR); } static u8 ems_pci_v2_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + port); } static void ems_pci_v2_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + port); } static void ems_pci_v2_post_irq(const struct sja1000_priv *priv) { struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); } /* * Check if a CAN controller is present at the specified location * by trying to set 'em into the PeliCAN mode */ static inline int ems_pci_check_chan(const struct sja1000_priv *priv) { unsigned char res; /* Make sure SJA1000 is in reset mode */ priv->write_reg(priv, REG_MOD, 1); priv->write_reg(priv, REG_CDR, CDR_PELICAN); /* read reset-values */ res = priv->read_reg(priv, REG_CDR); if (res == CDR_PELICAN) return 1; return 0; } static void ems_pci_del_card(struct pci_dev *pdev) { struct ems_pci_card *card = pci_get_drvdata(pdev); struct net_device *dev; int i = 0; for (i = 0; i < card->channels; i++) { dev = card->net_dev[i]; if (!dev) continue; dev_info(&pdev->dev, "Removing %s.\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); } if (card->base_addr != NULL) pci_iounmap(card->pci_dev, card->base_addr); if (card->conf_addr != NULL) pci_iounmap(card->pci_dev, card->conf_addr); kfree(card); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static void ems_pci_card_reset(struct ems_pci_card *card) { /* Request board reset */ writeb(0, card->base_addr); } /* * Probe PCI device for EMS CAN signature and register each available * CAN channel to SJA1000 Socket-CAN subsystem. */ static int __devinit ems_pci_add_card(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct net_device *dev; struct ems_pci_card *card; int max_chan, conf_size, base_bar; int err, i; /* Enabling PCI device */ if (pci_enable_device(pdev) < 0) { dev_err(&pdev->dev, "Enabling PCI device failed\n"); return -ENODEV; } /* Allocating card structures to hold addresses, ... */ card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL); if (card == NULL) { dev_err(&pdev->dev, "Unable to allocate memory\n"); pci_disable_device(pdev); return -ENOMEM; } pci_set_drvdata(pdev, card); card->pci_dev = pdev; card->channels = 0; if (pdev->vendor == PCI_VENDOR_ID_PLX) { card->version = 2; /* CPC-PCI v2 */ max_chan = EMS_PCI_V2_MAX_CHAN; base_bar = EMS_PCI_V2_BASE_BAR; conf_size = EMS_PCI_V2_CONF_SIZE; } else { card->version = 1; /* CPC-PCI v1 */ max_chan = EMS_PCI_V1_MAX_CHAN; base_bar = EMS_PCI_V1_BASE_BAR; conf_size = EMS_PCI_V1_CONF_SIZE; } /* Remap configuration space and controller memory area */ card->conf_addr = pci_iomap(pdev, 0, conf_size); if (card->conf_addr == NULL) { err = -ENOMEM; goto failure_cleanup; } card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE); if (card->base_addr == NULL) { err = -ENOMEM; goto failure_cleanup; } if (card->version == 1) { /* Configure PITA-2 parallel interface (enable MUX) */ writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC); /* Check for unique EMS CAN signature */ if (ems_pci_v1_readb(card, 0) != 0x55 || ems_pci_v1_readb(card, 1) != 0xAA || ems_pci_v1_readb(card, 2) != 0x01 || ems_pci_v1_readb(card, 3) != 0xCB || ems_pci_v1_readb(card, 4) != 0x11) { dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n"); err = -ENODEV; goto failure_cleanup; } } ems_pci_card_reset(card); /* Detect available channels */ for (i = 0; i < max_chan; i++) { dev = alloc_sja1000dev(0); if (dev == NULL) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET + (i * EMS_PCI_CAN_CTRL_SIZE); if (card->version == 1) { priv->read_reg = ems_pci_v1_read_reg; priv->write_reg = ems_pci_v1_write_reg; priv->post_irq = ems_pci_v1_post_irq; } else { priv->read_reg = ems_pci_v2_read_reg; priv->write_reg = ems_pci_v2_write_reg; priv->post_irq = ems_pci_v2_post_irq; } /* Check if channel is present */ if (ems_pci_check_chan(priv)) { priv->can.clock.freq = EMS_PCI_CAN_CLOCK; priv->ocr = EMS_PCI_OCR; priv->cdr = EMS_PCI_CDR; SET_NETDEV_DEV(dev, &pdev->dev); if (card->version == 1) /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr + PITA2_ICR); else /* enable IRQ in PLX 9030 */ writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "Registering device failed " "(err=%d)\n", err); free_sja1000dev(dev); goto failure_cleanup; } card->channels++; dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n", i + 1, priv->reg_base, dev->irq); } else { free_sja1000dev(dev); } } return 0; failure_cleanup: dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); ems_pci_del_card(pdev); return err; } static struct pci_driver ems_pci_driver = { .name = DRV_NAME, .id_table = ems_pci_tbl, .probe = ems_pci_add_card, .remove = ems_pci_del_card, }; static int __init ems_pci_init(void) { return pci_register_driver(&ems_pci_driver); } static void __exit ems_pci_exit(void) { pci_unregister_driver(&ems_pci_driver); } module_init(ems_pci_init); module_exit(ems_pci_exit);
gpl-2.0
kuba160/tf300t-kernel
fs/file_table.c
550
13523
/* * linux/fs/file_table.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/eventpoll.h> #include <linux/rcupdate.h> #include <linux/mount.h> #include <linux/capability.h> #include <linux/cdev.h> #include <linux/fsnotify.h> #include <linux/sysctl.h> #include <linux/lglock.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/ima.h> #include <linux/atomic.h> #include "internal.h" /* sysctl tunables... */ struct files_stat_struct files_stat = { .max_files = NR_FILE }; DECLARE_LGLOCK(files_lglock); DEFINE_LGLOCK(files_lglock); /* SLAB cache for file structures */ static struct kmem_cache *filp_cachep __read_mostly; static struct percpu_counter nr_files __cacheline_aligned_in_smp; static inline void file_free_rcu(struct rcu_head *head) { struct file *f = container_of(head, struct file, f_u.fu_rcuhead); put_cred(f->f_cred); kmem_cache_free(filp_cachep, f); } static inline void file_free(struct file *f) { percpu_counter_dec(&nr_files); file_check_state(f); call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); } /* * Return the total number of open files in the system */ static long get_nr_files(void) { return percpu_counter_read_positive(&nr_files); } /* * Return the maximum number of open files in the system */ unsigned long get_max_files(void) { return files_stat.max_files; } EXPORT_SYMBOL_GPL(get_max_files); /* * Handle nr_files sysctl */ #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #else int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. * * Be very careful using this. You are responsible for * getting write access to any mount that you might assign * to this filp, if it is opened for write. If this is not * done, you will imbalance int the mount's writer count * and a warning at __fput() time. */ struct file *get_empty_filp(void) { const struct cred *cred = current_cred(); static long old_max; struct file * f; /* * Privileged users can go above max_files */ if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { /* * percpu_counters are inaccurate. Do an expensive check before * we go and fail. */ if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) goto over; } f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (f == NULL) goto fail; percpu_counter_inc(&nr_files); f->f_cred = get_cred(cred); if (security_file_alloc(f)) goto fail_sec; INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ return f; over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } goto fail; fail_sec: file_free(f); fail: return NULL; } /** * alloc_file - allocate and initialize a 'struct file' * @mnt: the vfsmount on which the file will reside * @dentry: the dentry representing the new file * @mode: the mode with which the new file will be opened * @fop: the 'struct file_operations' for the new file * * Use this instead of get_empty_filp() to get a new * 'struct file'. Do so because of the same initialization * pitfalls reasons listed for init_file(). This is a * preferred interface to using init_file(). * * If all the callers of init_file() are eliminated, its * code should be moved into this function. */ struct file *alloc_file(struct path *path, fmode_t mode, const struct file_operations *fop) { struct file *file; file = get_empty_filp(); if (!file) return NULL; file->f_path = *path; file->f_mapping = path->dentry->d_inode->i_mapping; file->f_mode = mode; file->f_op = fop; /* * These mounts don't really matter in practice * for r/o bind mounts. They aren't userspace- * visible. We do this for consistency, and so * that we can do debugging checks at __fput() */ if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { file_take_write(file); WARN_ON(mnt_clone_write(path->mnt)); } if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(path->dentry->d_inode); return file; } EXPORT_SYMBOL(alloc_file); /** * drop_file_write_access - give up ability to write to a file * @file: the file to which we will stop writing * * This is a central place which will give up the ability * to write to @file, along with access to write through * its vfsmount. */ void drop_file_write_access(struct file *file) { struct vfsmount *mnt = file->f_path.mnt; struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; put_write_access(inode); if (special_file(inode->i_mode)) return; if (file_check_writeable(file) != 0) return; mnt_drop_write(mnt); file_release_write(file); } EXPORT_SYMBOL_GPL(drop_file_write_access); /* the real guts of fput() - releasing the last reference to file */ static void __fput(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = dentry->d_inode; might_sleep(); fsnotify_close(file); /* * The function eventpoll_release() should be the first called * in the file cleanup chain. */ eventpoll_release(file); locks_remove_flock(file); if (unlikely(file->f_flags & FASYNC)) { if (file->f_op && file->f_op->fasync) file->f_op->fasync(-1, file, 0); } if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && !(file->f_mode & FMODE_PATH))) { cdev_put(inode->i_cdev); } fops_put(file->f_op); put_pid(file->f_owner.pid); file_sb_list_del(file); if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_dec(inode); if (file->f_mode & FMODE_WRITE) drop_file_write_access(file); file->f_path.dentry = NULL; file->f_path.mnt = NULL; file_free(file); dput(dentry); mntput(mnt); } void fput(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) __fput(file); } EXPORT_SYMBOL(fput); struct file *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; rcu_read_lock(); file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken */ if (file->f_mode & FMODE_PATH || !atomic_long_inc_not_zero(&file->f_count)) file = NULL; } rcu_read_unlock(); return file; } EXPORT_SYMBOL(fget); struct file *fget_raw(unsigned int fd) { struct file *file; struct files_struct *files = current->files; rcu_read_lock(); file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken */ if (!atomic_long_inc_not_zero(&file->f_count)) file = NULL; } rcu_read_unlock(); return file; } EXPORT_SYMBOL(fget_raw); /* * Lightweight file lookup - no refcnt increment if fd table isn't shared. * * You can use this instead of fget if you satisfy all of the following * conditions: * 1) You must call fput_light before exiting the syscall and returning control * to userspace (i.e. you cannot remember the returned struct file * after * returning to userspace). * 2) You must not call filp_close on the returned struct file * in between * calls to fget_light and fput_light. * 3) You must not clone the current task in between the calls to fget_light * and fput_light. * * The fput_needed flag returned by fget_light should be passed to the * corresponding fput_light. */ struct file *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; *fput_needed = 0; if (atomic_read(&files->count) == 1) { file = fcheck_files(files, fd); if (file && (file->f_mode & FMODE_PATH)) file = NULL; } else { rcu_read_lock(); file = fcheck_files(files, fd); if (file) { if (!(file->f_mode & FMODE_PATH) && atomic_long_inc_not_zero(&file->f_count)) *fput_needed = 1; else /* Didn't get the reference, someone's freed */ file = NULL; } rcu_read_unlock(); } return file; } struct file *fget_raw_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; *fput_needed = 0; if (atomic_read(&files->count) == 1) { file = fcheck_files(files, fd); } else { rcu_read_lock(); file = fcheck_files(files, fd); if (file) { if (atomic_long_inc_not_zero(&file->f_count)) *fput_needed = 1; else /* Didn't get the reference, someone's freed */ file = NULL; } rcu_read_unlock(); } return file; } void put_filp(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); file_sb_list_del(file); file_free(file); } } static inline int file_list_cpu(struct file *file) { #ifdef CONFIG_SMP return file->f_sb_list_cpu; #else return smp_processor_id(); #endif } /* helper for file_sb_list_add to reduce ifdefs */ static inline void __file_sb_list_add(struct file *file, struct super_block *sb) { struct list_head *list; #ifdef CONFIG_SMP int cpu; cpu = smp_processor_id(); file->f_sb_list_cpu = cpu; list = per_cpu_ptr(sb->s_files, cpu); #else list = &sb->s_files; #endif list_add(&file->f_u.fu_list, list); } /** * file_sb_list_add - add a file to the sb's file list * @file: file to add * @sb: sb to add it to * * Use this function to associate a file with the superblock of the inode it * refers to. */ void file_sb_list_add(struct file *file, struct super_block *sb) { lg_local_lock(files_lglock); __file_sb_list_add(file, sb); lg_local_unlock(files_lglock); } /** * file_sb_list_del - remove a file from the sb's file list * @file: file to remove * @sb: sb to remove it from * * Use this function to remove a file from its superblock. */ void file_sb_list_del(struct file *file) { if (!list_empty(&file->f_u.fu_list)) { lg_local_lock_cpu(files_lglock, file_list_cpu(file)); list_del_init(&file->f_u.fu_list); lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); } } #ifdef CONFIG_SMP /* * These macros iterate all files on all CPUs for a given superblock. * files_lglock must be held globally. */ #define do_file_list_for_each_entry(__sb, __file) \ { \ int i; \ for_each_possible_cpu(i) { \ struct list_head *list; \ list = per_cpu_ptr((__sb)->s_files, i); \ list_for_each_entry((__file), list, f_u.fu_list) #define while_file_list_for_each_entry \ } \ } #else #define do_file_list_for_each_entry(__sb, __file) \ { \ struct list_head *list; \ list = &(sb)->s_files; \ list_for_each_entry((__file), list, f_u.fu_list) #define while_file_list_for_each_entry \ } #endif int fs_may_remount_ro(struct super_block *sb) { struct file *file; /* Check that no files are currently opened for writing. */ lg_global_lock(files_lglock); do_file_list_for_each_entry(sb, file) { struct inode *inode = file->f_path.dentry->d_inode; /* File with pending delete? */ if (inode->i_nlink == 0) goto too_bad; /* Writeable file? */ if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) goto too_bad; } while_file_list_for_each_entry; lg_global_unlock(files_lglock); return 1; /* Tis' cool bro. */ too_bad: lg_global_unlock(files_lglock); return 0; } /** * mark_files_ro - mark all files read-only * @sb: superblock in question * * All files are marked read-only. We don't care about pending * delete files so this should be used in 'force' mode only. */ void mark_files_ro(struct super_block *sb) { struct file *f; retry: lg_global_lock(files_lglock); do_file_list_for_each_entry(sb, f) { struct vfsmount *mnt; if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) continue; if (!file_count(f)) continue; if (!(f->f_mode & FMODE_WRITE)) continue; spin_lock(&f->f_lock); f->f_mode &= ~FMODE_WRITE; spin_unlock(&f->f_lock); if (file_check_writeable(f) != 0) continue; file_release_write(f); mnt = mntget(f->f_path.mnt); /* This can sleep, so we can't hold the spinlock. */ lg_global_unlock(files_lglock); mnt_drop_write(mnt); mntput(mnt); goto retry; } while_file_list_for_each_entry; lg_global_unlock(files_lglock); } void __init files_init(unsigned long mempages) { unsigned long n; filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); /* * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); files_defer_init(); lg_lock_init(files_lglock); percpu_counter_init(&nr_files, 0); }
gpl-2.0
AnesHadzi/linux-socfpga
drivers/char/xillybus/xillybus_core.c
806
53662
/* * linux/drivers/misc/xillybus_core.c * * Copyright 2011 Xillybus Ltd, http://xillybus.com * * Driver for the Xillybus FPGA/host framework. * * This driver interfaces with a special IP core in an FPGA, setting up * a pipe between a hardware FIFO in the programmable logic and a device * file in the host. The number of such pipes and their attributes are * set up on the logic. This driver detects these automatically and * creates the device files accordingly. * * This program is free software; you can redistribute it and/or modify * it under the smems of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ #include <linux/list.h> #include <linux/device.h> #include <linux/module.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/crc32.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "xillybus.h" MODULE_DESCRIPTION("Xillybus core functions"); MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); MODULE_VERSION("1.07"); MODULE_ALIAS("xillybus_core"); MODULE_LICENSE("GPL v2"); /* General timeout is 100 ms, rx timeout is 10 ms */ #define XILLY_RX_TIMEOUT (10*HZ/1000) #define XILLY_TIMEOUT (100*HZ/1000) #define fpga_msg_ctrl_reg 0x0008 #define fpga_dma_control_reg 0x0020 #define fpga_dma_bufno_reg 0x0024 #define fpga_dma_bufaddr_lowaddr_reg 0x0028 #define fpga_dma_bufaddr_highaddr_reg 0x002c #define fpga_buf_ctrl_reg 0x0030 #define fpga_buf_offset_reg 0x0034 #define fpga_endian_reg 0x0040 #define XILLYMSG_OPCODE_RELEASEBUF 1 #define XILLYMSG_OPCODE_QUIESCEACK 2 #define XILLYMSG_OPCODE_FIFOEOF 3 #define XILLYMSG_OPCODE_FATAL_ERROR 4 #define XILLYMSG_OPCODE_NONEMPTY 5 static const char xillyname[] = "xillybus"; static struct class *xillybus_class; /* * ep_list_lock is the last lock to be taken; No other lock requests are * allowed while holding it. It merely protects list_of_endpoints, and not * the endpoints listed in it. */ static LIST_HEAD(list_of_endpoints); static struct mutex ep_list_lock; static struct workqueue_struct *xillybus_wq; /* * Locking scheme: Mutexes protect invocations of character device methods. * If both locks are taken, wr_mutex is taken first, rd_mutex second. * * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the * buffers' end_offset fields against changes made by IRQ handler (and in * theory, other file request handlers, but the mutex handles that). Nothing * else. * They are held for short direct memory manipulations. Needless to say, * no mutex locking is allowed when a spinlock is held. * * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset. * * register_mutex is endpoint-specific, and is held when non-atomic * register operations are performed. wr_mutex and rd_mutex may be * held when register_mutex is taken, but none of the spinlocks. Note that * register_mutex doesn't protect against sporadic buf_ctrl_reg writes * which are unrelated to buf_offset_reg, since they are harmless. * * Blocking on the wait queues is allowed with mutexes held, but not with * spinlocks. * * Only interruptible blocking is allowed on mutexes and wait queues. * * All in all, the locking order goes (with skips allowed, of course): * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock */ static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf) { int opcode; int msg_channel, msg_bufno, msg_data, msg_dir; opcode = (buf[0] >> 24) & 0xff; msg_dir = buf[0] & 1; msg_channel = (buf[0] >> 1) & 0x7ff; msg_bufno = (buf[0] >> 12) & 0x3ff; msg_data = buf[1] & 0xfffffff; dev_warn(endpoint->dev, "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n", opcode, msg_channel, msg_dir, msg_bufno, msg_data); } /* * xillybus_isr assumes the interrupt is allocated exclusively to it, * which is the natural case MSI and several other hardware-oriented * interrupts. Sharing is not allowed. */ irqreturn_t xillybus_isr(int irq, void *data) { struct xilly_endpoint *ep = data; u32 *buf; unsigned int buf_size; int i; int opcode; unsigned int msg_channel, msg_bufno, msg_data, msg_dir; struct xilly_channel *channel; buf = ep->msgbuf_addr; buf_size = ep->msg_buf_size/sizeof(u32); ep->ephw->hw_sync_sgl_for_cpu(ep, ep->msgbuf_dma_addr, ep->msg_buf_size, DMA_FROM_DEVICE); for (i = 0; i < buf_size; i += 2) { if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { malformed_message(ep, &buf[i]); dev_warn(ep->dev, "Sending a NACK on counter %x (instead of %x) on entry %d\n", ((buf[i+1] >> 28) & 0xf), ep->msg_counter, i/2); if (++ep->failed_messages > 10) { dev_err(ep->dev, "Lost sync with interrupt messages. Stopping.\n"); } else { ep->ephw->hw_sync_sgl_for_device( ep, ep->msgbuf_dma_addr, ep->msg_buf_size, DMA_FROM_DEVICE); iowrite32(0x01, /* Message NACK */ ep->registers + fpga_msg_ctrl_reg); } return IRQ_HANDLED; } else if (buf[i] & (1 << 22)) /* Last message */ break; } if (i >= buf_size) { dev_err(ep->dev, "Bad interrupt message. Stopping.\n"); return IRQ_HANDLED; } buf_size = i + 2; for (i = 0; i < buf_size; i += 2) { /* Scan through messages */ opcode = (buf[i] >> 24) & 0xff; msg_dir = buf[i] & 1; msg_channel = (buf[i] >> 1) & 0x7ff; msg_bufno = (buf[i] >> 12) & 0x3ff; msg_data = buf[i+1] & 0xfffffff; switch (opcode) { case XILLYMSG_OPCODE_RELEASEBUF: if ((msg_channel > ep->num_channels) || (msg_channel == 0)) { malformed_message(ep, &buf[i]); break; } channel = ep->channels[msg_channel]; if (msg_dir) { /* Write channel */ if (msg_bufno >= channel->num_wr_buffers) { malformed_message(ep, &buf[i]); break; } spin_lock(&channel->wr_spinlock); channel->wr_buffers[msg_bufno]->end_offset = msg_data; channel->wr_fpga_buf_idx = msg_bufno; channel->wr_empty = 0; channel->wr_sleepy = 0; spin_unlock(&channel->wr_spinlock); wake_up_interruptible(&channel->wr_wait); } else { /* Read channel */ if (msg_bufno >= channel->num_rd_buffers) { malformed_message(ep, &buf[i]); break; } spin_lock(&channel->rd_spinlock); channel->rd_fpga_buf_idx = msg_bufno; channel->rd_full = 0; spin_unlock(&channel->rd_spinlock); wake_up_interruptible(&channel->rd_wait); if (!channel->rd_synchronous) queue_delayed_work( xillybus_wq, &channel->rd_workitem, XILLY_RX_TIMEOUT); } break; case XILLYMSG_OPCODE_NONEMPTY: if ((msg_channel > ep->num_channels) || (msg_channel == 0) || (!msg_dir) || !ep->channels[msg_channel]->wr_supports_nonempty) { malformed_message(ep, &buf[i]); break; } channel = ep->channels[msg_channel]; if (msg_bufno >= channel->num_wr_buffers) { malformed_message(ep, &buf[i]); break; } spin_lock(&channel->wr_spinlock); if (msg_bufno == channel->wr_host_buf_idx) channel->wr_ready = 1; spin_unlock(&channel->wr_spinlock); wake_up_interruptible(&channel->wr_ready_wait); break; case XILLYMSG_OPCODE_QUIESCEACK: ep->idtlen = msg_data; wake_up_interruptible(&ep->ep_wait); break; case XILLYMSG_OPCODE_FIFOEOF: if ((msg_channel > ep->num_channels) || (msg_channel == 0) || (!msg_dir) || !ep->channels[msg_channel]->num_wr_buffers) { malformed_message(ep, &buf[i]); break; } channel = ep->channels[msg_channel]; spin_lock(&channel->wr_spinlock); channel->wr_eof = msg_bufno; channel->wr_sleepy = 0; channel->wr_hangup = channel->wr_empty && (channel->wr_host_buf_idx == msg_bufno); spin_unlock(&channel->wr_spinlock); wake_up_interruptible(&channel->wr_wait); break; case XILLYMSG_OPCODE_FATAL_ERROR: ep->fatal_error = 1; wake_up_interruptible(&ep->ep_wait); /* For select() */ dev_err(ep->dev, "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n"); break; default: malformed_message(ep, &buf[i]); break; } } ep->ephw->hw_sync_sgl_for_device(ep, ep->msgbuf_dma_addr, ep->msg_buf_size, DMA_FROM_DEVICE); ep->msg_counter = (ep->msg_counter + 1) & 0xf; ep->failed_messages = 0; iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */ return IRQ_HANDLED; } EXPORT_SYMBOL(xillybus_isr); /* * A few trivial memory management functions. * NOTE: These functions are used only on probe and remove, and therefore * no locks are applied! */ static void xillybus_autoflush(struct work_struct *work); struct xilly_alloc_state { void *salami; int left_of_salami; int nbuffer; enum dma_data_direction direction; u32 regdirection; }; static int xilly_get_dma_buffers(struct xilly_endpoint *ep, struct xilly_alloc_state *s, struct xilly_buffer **buffers, int bufnum, int bytebufsize) { int i, rc; dma_addr_t dma_addr; struct device *dev = ep->dev; struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */ if (buffers) { /* Not the message buffer */ this_buffer = devm_kcalloc(dev, bufnum, sizeof(struct xilly_buffer), GFP_KERNEL); if (!this_buffer) return -ENOMEM; } for (i = 0; i < bufnum; i++) { /* * Buffers are expected in descending size order, so there * is either enough space for this buffer or none at all. */ if ((s->left_of_salami < bytebufsize) && (s->left_of_salami > 0)) { dev_err(ep->dev, "Corrupt buffer allocation in IDT. Aborting.\n"); return -ENODEV; } if (s->left_of_salami == 0) { int allocorder, allocsize; allocsize = PAGE_SIZE; allocorder = 0; while (bytebufsize > allocsize) { allocsize *= 2; allocorder++; } s->salami = (void *) devm_get_free_pages( dev, GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, allocorder); if (!s->salami) return -ENOMEM; s->left_of_salami = allocsize; } rc = ep->ephw->map_single(ep, s->salami, bytebufsize, s->direction, &dma_addr); if (rc) return rc; iowrite32((u32) (dma_addr & 0xffffffff), ep->registers + fpga_dma_bufaddr_lowaddr_reg); iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)), ep->registers + fpga_dma_bufaddr_highaddr_reg); if (buffers) { /* Not the message buffer */ this_buffer->addr = s->salami; this_buffer->dma_addr = dma_addr; buffers[i] = this_buffer++; iowrite32(s->regdirection | s->nbuffer++, ep->registers + fpga_dma_bufno_reg); } else { ep->msgbuf_addr = s->salami; ep->msgbuf_dma_addr = dma_addr; ep->msg_buf_size = bytebufsize; iowrite32(s->regdirection, ep->registers + fpga_dma_bufno_reg); } s->left_of_salami -= bytebufsize; s->salami += bytebufsize; } return 0; } static int xilly_setupchannels(struct xilly_endpoint *ep, unsigned char *chandesc, int entries) { struct device *dev = ep->dev; int i, entry, rc; struct xilly_channel *channel; int channelnum, bufnum, bufsize, format, is_writebuf; int bytebufsize; int synchronous, allowpartial, exclusive_open, seekable; int supports_nonempty; int msg_buf_done = 0; struct xilly_alloc_state rd_alloc = { .salami = NULL, .left_of_salami = 0, .nbuffer = 1, .direction = DMA_TO_DEVICE, .regdirection = 0, }; struct xilly_alloc_state wr_alloc = { .salami = NULL, .left_of_salami = 0, .nbuffer = 1, .direction = DMA_FROM_DEVICE, .regdirection = 0x80000000, }; channel = devm_kcalloc(dev, ep->num_channels, sizeof(struct xilly_channel), GFP_KERNEL); if (!channel) return -ENOMEM; ep->channels = devm_kcalloc(dev, ep->num_channels + 1, sizeof(struct xilly_channel *), GFP_KERNEL); if (!ep->channels) return -ENOMEM; ep->channels[0] = NULL; /* Channel 0 is message buf. */ /* Initialize all channels with defaults */ for (i = 1; i <= ep->num_channels; i++) { channel->wr_buffers = NULL; channel->rd_buffers = NULL; channel->num_wr_buffers = 0; channel->num_rd_buffers = 0; channel->wr_fpga_buf_idx = -1; channel->wr_host_buf_idx = 0; channel->wr_host_buf_pos = 0; channel->wr_empty = 1; channel->wr_ready = 0; channel->wr_sleepy = 1; channel->rd_fpga_buf_idx = 0; channel->rd_host_buf_idx = 0; channel->rd_host_buf_pos = 0; channel->rd_full = 0; channel->wr_ref_count = 0; channel->rd_ref_count = 0; spin_lock_init(&channel->wr_spinlock); spin_lock_init(&channel->rd_spinlock); mutex_init(&channel->wr_mutex); mutex_init(&channel->rd_mutex); init_waitqueue_head(&channel->rd_wait); init_waitqueue_head(&channel->wr_wait); init_waitqueue_head(&channel->wr_ready_wait); INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush); channel->endpoint = ep; channel->chan_num = i; channel->log2_element_size = 0; ep->channels[i] = channel++; } for (entry = 0; entry < entries; entry++, chandesc += 4) { struct xilly_buffer **buffers = NULL; is_writebuf = chandesc[0] & 0x01; channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7); format = (chandesc[1] >> 4) & 0x03; allowpartial = (chandesc[1] >> 6) & 0x01; synchronous = (chandesc[1] >> 7) & 0x01; bufsize = 1 << (chandesc[2] & 0x1f); bufnum = 1 << (chandesc[3] & 0x0f); exclusive_open = (chandesc[2] >> 7) & 0x01; seekable = (chandesc[2] >> 6) & 0x01; supports_nonempty = (chandesc[2] >> 5) & 0x01; if ((channelnum > ep->num_channels) || ((channelnum == 0) && !is_writebuf)) { dev_err(ep->dev, "IDT requests channel out of range. Aborting.\n"); return -ENODEV; } channel = ep->channels[channelnum]; /* NULL for msg channel */ if (!is_writebuf || channelnum > 0) { channel->log2_element_size = ((format > 2) ? 2 : format); bytebufsize = channel->rd_buf_size = bufsize * (1 << channel->log2_element_size); buffers = devm_kcalloc(dev, bufnum, sizeof(struct xilly_buffer *), GFP_KERNEL); if (!buffers) return -ENOMEM; } else { bytebufsize = bufsize << 2; } if (!is_writebuf) { channel->num_rd_buffers = bufnum; channel->rd_allow_partial = allowpartial; channel->rd_synchronous = synchronous; channel->rd_exclusive_open = exclusive_open; channel->seekable = seekable; channel->rd_buffers = buffers; rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers, bufnum, bytebufsize); } else if (channelnum > 0) { channel->num_wr_buffers = bufnum; channel->seekable = seekable; channel->wr_supports_nonempty = supports_nonempty; channel->wr_allow_partial = allowpartial; channel->wr_synchronous = synchronous; channel->wr_exclusive_open = exclusive_open; channel->wr_buffers = buffers; rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers, bufnum, bytebufsize); } else { rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL, bufnum, bytebufsize); msg_buf_done++; } if (rc) return -ENOMEM; } if (!msg_buf_done) { dev_err(ep->dev, "Corrupt IDT: No message buffer. Aborting.\n"); return -ENODEV; } return 0; } static int xilly_scan_idt(struct xilly_endpoint *endpoint, struct xilly_idt_handle *idt_handle) { int count = 0; unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr; unsigned char *end_of_idt = idt + endpoint->idtlen - 4; unsigned char *scan; int len; scan = idt; idt_handle->idt = idt; scan++; /* Skip version number */ while ((scan <= end_of_idt) && *scan) { while ((scan <= end_of_idt) && *scan++) /* Do nothing, just scan thru string */; count++; } scan++; if (scan > end_of_idt) { dev_err(endpoint->dev, "IDT device name list overflow. Aborting.\n"); return -ENODEV; } idt_handle->chandesc = scan; len = endpoint->idtlen - (3 + ((int) (scan - idt))); if (len & 0x03) { dev_err(endpoint->dev, "Corrupt IDT device name list. Aborting.\n"); return -ENODEV; } idt_handle->entries = len >> 2; endpoint->num_channels = count; return 0; } static int xilly_obtain_idt(struct xilly_endpoint *endpoint) { struct xilly_channel *channel; unsigned char *version; long t; channel = endpoint->channels[1]; /* This should be generated ad-hoc */ channel->wr_sleepy = 1; iowrite32(1 | (3 << 24), /* Opcode 3 for channel 0 = Send IDT */ endpoint->registers + fpga_buf_ctrl_reg); t = wait_event_interruptible_timeout(channel->wr_wait, (!channel->wr_sleepy), XILLY_TIMEOUT); if (t <= 0) { dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n"); if (endpoint->fatal_error) return -EIO; return -ENODEV; } endpoint->ephw->hw_sync_sgl_for_cpu( channel->endpoint, channel->wr_buffers[0]->dma_addr, channel->wr_buf_size, DMA_FROM_DEVICE); if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) { dev_err(endpoint->dev, "IDT length mismatch (%d != %d). Aborting.\n", channel->wr_buffers[0]->end_offset, endpoint->idtlen); return -ENODEV; } if (crc32_le(~0, channel->wr_buffers[0]->addr, endpoint->idtlen+1) != 0) { dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n"); return -ENODEV; } version = channel->wr_buffers[0]->addr; /* Check version number. Accept anything below 0x82 for now. */ if (*version > 0x82) { dev_err(endpoint->dev, "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n", *version); return -ENODEV; } return 0; } static ssize_t xillybus_read(struct file *filp, char __user *userbuf, size_t count, loff_t *f_pos) { ssize_t rc; unsigned long flags; int bytes_done = 0; int no_time_left = 0; long deadline, left_to_sleep; struct xilly_channel *channel = filp->private_data; int empty, reached_eof, exhausted, ready; /* Initializations are there only to silence warnings */ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; int waiting_bufidx; if (channel->endpoint->fatal_error) return -EIO; deadline = jiffies + 1 + XILLY_RX_TIMEOUT; rc = mutex_lock_interruptible(&channel->wr_mutex); if (rc) return rc; while (1) { /* Note that we may drop mutex within this loop */ int bytes_to_do = count - bytes_done; spin_lock_irqsave(&channel->wr_spinlock, flags); empty = channel->wr_empty; ready = !empty || channel->wr_ready; if (!empty) { bufidx = channel->wr_host_buf_idx; bufpos = channel->wr_host_buf_pos; howmany = ((channel->wr_buffers[bufidx]->end_offset + 1) << channel->log2_element_size) - bufpos; /* Update wr_host_* to its post-operation state */ if (howmany > bytes_to_do) { bufferdone = 0; howmany = bytes_to_do; channel->wr_host_buf_pos += howmany; } else { bufferdone = 1; channel->wr_host_buf_pos = 0; if (bufidx == channel->wr_fpga_buf_idx) { channel->wr_empty = 1; channel->wr_sleepy = 1; channel->wr_ready = 0; } if (bufidx >= (channel->num_wr_buffers - 1)) channel->wr_host_buf_idx = 0; else channel->wr_host_buf_idx++; } } /* * Marking our situation after the possible changes above, * for use after releasing the spinlock. * * empty = empty before change * exhasted = empty after possible change */ reached_eof = channel->wr_empty && (channel->wr_host_buf_idx == channel->wr_eof); channel->wr_hangup = reached_eof; exhausted = channel->wr_empty; waiting_bufidx = channel->wr_host_buf_idx; spin_unlock_irqrestore(&channel->wr_spinlock, flags); if (!empty) { /* Go on, now without the spinlock */ if (bufpos == 0) /* Position zero means it's virgin */ channel->endpoint->ephw->hw_sync_sgl_for_cpu( channel->endpoint, channel->wr_buffers[bufidx]->dma_addr, channel->wr_buf_size, DMA_FROM_DEVICE); if (copy_to_user( userbuf, channel->wr_buffers[bufidx]->addr + bufpos, howmany)) rc = -EFAULT; userbuf += howmany; bytes_done += howmany; if (bufferdone) { channel->endpoint->ephw->hw_sync_sgl_for_device( channel->endpoint, channel->wr_buffers[bufidx]->dma_addr, channel->wr_buf_size, DMA_FROM_DEVICE); /* * Tell FPGA the buffer is done with. It's an * atomic operation to the FPGA, so what * happens with other channels doesn't matter, * and the certain channel is protected with * the channel-specific mutex. */ iowrite32(1 | (channel->chan_num << 1) | (bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); } if (rc) { mutex_unlock(&channel->wr_mutex); return rc; } } /* This includes a zero-count return = EOF */ if ((bytes_done >= count) || reached_eof) break; if (!exhausted) continue; /* More in RAM buffer(s)? Just go on. */ if ((bytes_done > 0) && (no_time_left || (channel->wr_synchronous && channel->wr_allow_partial))) break; /* * Nonblocking read: The "ready" flag tells us that the FPGA * has data to send. In non-blocking mode, if it isn't on, * just return. But if there is, we jump directly to the point * where we ask for the FPGA to send all it has, and wait * until that data arrives. So in a sense, we *do* block in * nonblocking mode, but only for a very short time. */ if (!no_time_left && (filp->f_flags & O_NONBLOCK)) { if (bytes_done > 0) break; if (ready) goto desperate; rc = -EAGAIN; break; } if (!no_time_left || (bytes_done > 0)) { /* * Note that in case of an element-misaligned read * request, offsetlimit will include the last element, * which will be partially read from. */ int offsetlimit = ((count - bytes_done) - 1) >> channel->log2_element_size; int buf_elements = channel->wr_buf_size >> channel->log2_element_size; /* * In synchronous mode, always send an offset limit. * Just don't send a value too big. */ if (channel->wr_synchronous) { /* Don't request more than one buffer */ if (channel->wr_allow_partial && (offsetlimit >= buf_elements)) offsetlimit = buf_elements - 1; /* Don't request more than all buffers */ if (!channel->wr_allow_partial && (offsetlimit >= (buf_elements * channel->num_wr_buffers))) offsetlimit = buf_elements * channel->num_wr_buffers - 1; } /* * In asynchronous mode, force early flush of a buffer * only if that will allow returning a full count. The * "offsetlimit < ( ... )" rather than "<=" excludes * requesting a full buffer, which would obviously * cause a buffer transmission anyhow */ if (channel->wr_synchronous || (offsetlimit < (buf_elements - 1))) { mutex_lock(&channel->endpoint->register_mutex); iowrite32(offsetlimit, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32(1 | (channel->chan_num << 1) | (2 << 24) | /* 2 = offset limit */ (waiting_bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint-> register_mutex); } } /* * If partial completion is disallowed, there is no point in * timeout sleeping. Neither if no_time_left is set and * there's no data. */ if (!channel->wr_allow_partial || (no_time_left && (bytes_done == 0))) { /* * This do-loop will run more than once if another * thread reasserted wr_sleepy before we got the mutex * back, so we try again. */ do { mutex_unlock(&channel->wr_mutex); if (wait_event_interruptible( channel->wr_wait, (!channel->wr_sleepy))) goto interrupted; if (mutex_lock_interruptible( &channel->wr_mutex)) goto interrupted; } while (channel->wr_sleepy); continue; interrupted: /* Mutex is not held if got here */ if (channel->endpoint->fatal_error) return -EIO; if (bytes_done) return bytes_done; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; /* Don't admit snoozing */ return -EINTR; } left_to_sleep = deadline - ((long) jiffies); /* * If our time is out, skip the waiting. We may miss wr_sleepy * being deasserted but hey, almost missing the train is like * missing it. */ if (left_to_sleep > 0) { left_to_sleep = wait_event_interruptible_timeout( channel->wr_wait, (!channel->wr_sleepy), left_to_sleep); if (left_to_sleep > 0) /* wr_sleepy deasserted */ continue; if (left_to_sleep < 0) { /* Interrupt */ mutex_unlock(&channel->wr_mutex); if (channel->endpoint->fatal_error) return -EIO; if (bytes_done) return bytes_done; return -EINTR; } } desperate: no_time_left = 1; /* We're out of sleeping time. Desperate! */ if (bytes_done == 0) { /* * Reaching here means that we allow partial return, * that we've run out of time, and that we have * nothing to return. * So tell the FPGA to send anything it has or gets. */ iowrite32(1 | (channel->chan_num << 1) | (3 << 24) | /* Opcode 3, flush it all! */ (waiting_bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); } /* * Reaching here means that we *do* have data in the buffer, * but the "partial" flag disallows returning less than * required. And we don't have as much. So loop again, * which is likely to end up blocking indefinitely until * enough data has arrived. */ } mutex_unlock(&channel->wr_mutex); if (channel->endpoint->fatal_error) return -EIO; if (rc) return rc; return bytes_done; } /* * The timeout argument takes values as follows: * >0 : Flush with timeout * ==0 : Flush, and wait idefinitely for the flush to complete * <0 : Autoflush: Flush only if there's a single buffer occupied */ static int xillybus_myflush(struct xilly_channel *channel, long timeout) { int rc; unsigned long flags; int end_offset_plus1; int bufidx, bufidx_minus1; int i; int empty; int new_rd_host_buf_pos; if (channel->endpoint->fatal_error) return -EIO; rc = mutex_lock_interruptible(&channel->rd_mutex); if (rc) return rc; /* * Don't flush a closed channel. This can happen when the work queued * autoflush thread fires off after the file has closed. This is not * an error, just something to dismiss. */ if (!channel->rd_ref_count) goto done; bufidx = channel->rd_host_buf_idx; bufidx_minus1 = (bufidx == 0) ? channel->num_rd_buffers - 1 : bufidx - 1; end_offset_plus1 = channel->rd_host_buf_pos >> channel->log2_element_size; new_rd_host_buf_pos = channel->rd_host_buf_pos - (end_offset_plus1 << channel->log2_element_size); /* Submit the current buffer if it's nonempty */ if (end_offset_plus1) { unsigned char *tail = channel->rd_buffers[bufidx]->addr + (end_offset_plus1 << channel->log2_element_size); /* Copy unflushed data, so we can put it in next buffer */ for (i = 0; i < new_rd_host_buf_pos; i++) channel->rd_leftovers[i] = *tail++; spin_lock_irqsave(&channel->rd_spinlock, flags); /* Autoflush only if a single buffer is occupied */ if ((timeout < 0) && (channel->rd_full || (bufidx_minus1 != channel->rd_fpga_buf_idx))) { spin_unlock_irqrestore(&channel->rd_spinlock, flags); /* * A new work item may be queued by the ISR exactly * now, since the execution of a work item allows the * queuing of a new one while it's running. */ goto done; } /* The 4th element is never needed for data, so it's a flag */ channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0); /* Set up rd_full to reflect a certain moment's state */ if (bufidx == channel->rd_fpga_buf_idx) channel->rd_full = 1; spin_unlock_irqrestore(&channel->rd_spinlock, flags); if (bufidx >= (channel->num_rd_buffers - 1)) channel->rd_host_buf_idx = 0; else channel->rd_host_buf_idx++; channel->endpoint->ephw->hw_sync_sgl_for_device( channel->endpoint, channel->rd_buffers[bufidx]->dma_addr, channel->rd_buf_size, DMA_TO_DEVICE); mutex_lock(&channel->endpoint->register_mutex); iowrite32(end_offset_plus1 - 1, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32((channel->chan_num << 1) | /* Channel ID */ (2 << 24) | /* Opcode 2, submit buffer */ (bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint->register_mutex); } else if (bufidx == 0) { bufidx = channel->num_rd_buffers - 1; } else { bufidx--; } channel->rd_host_buf_pos = new_rd_host_buf_pos; if (timeout < 0) goto done; /* Autoflush */ /* * bufidx is now the last buffer written to (or equal to * rd_fpga_buf_idx if buffer was never written to), and * channel->rd_host_buf_idx the one after it. * * If bufidx == channel->rd_fpga_buf_idx we're either empty or full. */ while (1) { /* Loop waiting for draining of buffers */ spin_lock_irqsave(&channel->rd_spinlock, flags); if (bufidx != channel->rd_fpga_buf_idx) channel->rd_full = 1; /* * Not really full, * but needs waiting. */ empty = !channel->rd_full; spin_unlock_irqrestore(&channel->rd_spinlock, flags); if (empty) break; /* * Indefinite sleep with mutex taken. With data waiting for * flushing user should not be surprised if open() for write * sleeps. */ if (timeout == 0) wait_event_interruptible(channel->rd_wait, (!channel->rd_full)); else if (wait_event_interruptible_timeout( channel->rd_wait, (!channel->rd_full), timeout) == 0) { dev_warn(channel->endpoint->dev, "Timed out while flushing. Output data may be lost.\n"); rc = -ETIMEDOUT; break; } if (channel->rd_full) { rc = -EINTR; break; } } done: mutex_unlock(&channel->rd_mutex); if (channel->endpoint->fatal_error) return -EIO; return rc; } static int xillybus_flush(struct file *filp, fl_owner_t id) { if (!(filp->f_mode & FMODE_WRITE)) return 0; return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */ } static void xillybus_autoflush(struct work_struct *work) { struct delayed_work *workitem = container_of( work, struct delayed_work, work); struct xilly_channel *channel = container_of( workitem, struct xilly_channel, rd_workitem); int rc; rc = xillybus_myflush(channel, -1); if (rc == -EINTR) dev_warn(channel->endpoint->dev, "Autoflush failed because work queue thread got a signal.\n"); else if (rc) dev_err(channel->endpoint->dev, "Autoflush failed under weird circumstances.\n"); } static ssize_t xillybus_write(struct file *filp, const char __user *userbuf, size_t count, loff_t *f_pos) { ssize_t rc; unsigned long flags; int bytes_done = 0; struct xilly_channel *channel = filp->private_data; int full, exhausted; /* Initializations are there only to silence warnings */ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; int end_offset_plus1 = 0; if (channel->endpoint->fatal_error) return -EIO; rc = mutex_lock_interruptible(&channel->rd_mutex); if (rc) return rc; while (1) { int bytes_to_do = count - bytes_done; spin_lock_irqsave(&channel->rd_spinlock, flags); full = channel->rd_full; if (!full) { bufidx = channel->rd_host_buf_idx; bufpos = channel->rd_host_buf_pos; howmany = channel->rd_buf_size - bufpos; /* * Update rd_host_* to its state after this operation. * count=0 means committing the buffer immediately, * which is like flushing, but not necessarily block. */ if ((howmany > bytes_to_do) && (count || ((bufpos >> channel->log2_element_size) == 0))) { bufferdone = 0; howmany = bytes_to_do; channel->rd_host_buf_pos += howmany; } else { bufferdone = 1; if (count) { end_offset_plus1 = channel->rd_buf_size >> channel->log2_element_size; channel->rd_host_buf_pos = 0; } else { unsigned char *tail; int i; howmany = 0; end_offset_plus1 = bufpos >> channel->log2_element_size; channel->rd_host_buf_pos -= end_offset_plus1 << channel->log2_element_size; tail = channel-> rd_buffers[bufidx]->addr + (end_offset_plus1 << channel->log2_element_size); for (i = 0; i < channel->rd_host_buf_pos; i++) channel->rd_leftovers[i] = *tail++; } if (bufidx == channel->rd_fpga_buf_idx) channel->rd_full = 1; if (bufidx >= (channel->num_rd_buffers - 1)) channel->rd_host_buf_idx = 0; else channel->rd_host_buf_idx++; } } /* * Marking our situation after the possible changes above, * for use after releasing the spinlock. * * full = full before change * exhasted = full after possible change */ exhausted = channel->rd_full; spin_unlock_irqrestore(&channel->rd_spinlock, flags); if (!full) { /* Go on, now without the spinlock */ unsigned char *head = channel->rd_buffers[bufidx]->addr; int i; if ((bufpos == 0) || /* Zero means it's virgin */ (channel->rd_leftovers[3] != 0)) { channel->endpoint->ephw->hw_sync_sgl_for_cpu( channel->endpoint, channel->rd_buffers[bufidx]->dma_addr, channel->rd_buf_size, DMA_TO_DEVICE); /* Virgin, but leftovers are due */ for (i = 0; i < bufpos; i++) *head++ = channel->rd_leftovers[i]; channel->rd_leftovers[3] = 0; /* Clear flag */ } if (copy_from_user( channel->rd_buffers[bufidx]->addr + bufpos, userbuf, howmany)) rc = -EFAULT; userbuf += howmany; bytes_done += howmany; if (bufferdone) { channel->endpoint->ephw->hw_sync_sgl_for_device( channel->endpoint, channel->rd_buffers[bufidx]->dma_addr, channel->rd_buf_size, DMA_TO_DEVICE); mutex_lock(&channel->endpoint->register_mutex); iowrite32(end_offset_plus1 - 1, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32((channel->chan_num << 1) | (2 << 24) | /* 2 = submit buffer */ (bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint-> register_mutex); channel->rd_leftovers[3] = (channel->rd_host_buf_pos != 0); } if (rc) { mutex_unlock(&channel->rd_mutex); if (channel->endpoint->fatal_error) return -EIO; if (!channel->rd_synchronous) queue_delayed_work( xillybus_wq, &channel->rd_workitem, XILLY_RX_TIMEOUT); return rc; } } if (bytes_done >= count) break; if (!exhausted) continue; /* If there's more space, just go on */ if ((bytes_done > 0) && channel->rd_allow_partial) break; /* * Indefinite sleep with mutex taken. With data waiting for * flushing, user should not be surprised if open() for write * sleeps. */ if (filp->f_flags & O_NONBLOCK) { rc = -EAGAIN; break; } if (wait_event_interruptible(channel->rd_wait, (!channel->rd_full))) { mutex_unlock(&channel->rd_mutex); if (channel->endpoint->fatal_error) return -EIO; if (bytes_done) return bytes_done; return -EINTR; } } mutex_unlock(&channel->rd_mutex); if (!channel->rd_synchronous) queue_delayed_work(xillybus_wq, &channel->rd_workitem, XILLY_RX_TIMEOUT); if (channel->endpoint->fatal_error) return -EIO; if (rc) return rc; if ((channel->rd_synchronous) && (bytes_done > 0)) { rc = xillybus_myflush(filp->private_data, 0); /* No timeout */ if (rc && (rc != -EINTR)) return rc; } return bytes_done; } static int xillybus_open(struct inode *inode, struct file *filp) { int rc = 0; unsigned long flags; int minor = iminor(inode); int major = imajor(inode); struct xilly_endpoint *ep_iter, *endpoint = NULL; struct xilly_channel *channel; mutex_lock(&ep_list_lock); list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) { if ((ep_iter->major == major) && (minor >= ep_iter->lowest_minor) && (minor < (ep_iter->lowest_minor + ep_iter->num_channels))) { endpoint = ep_iter; break; } } mutex_unlock(&ep_list_lock); if (!endpoint) { pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n", major, minor); return -ENODEV; } if (endpoint->fatal_error) return -EIO; channel = endpoint->channels[1 + minor - endpoint->lowest_minor]; filp->private_data = channel; /* * It gets complicated because: * 1. We don't want to take a mutex we don't have to * 2. We don't want to open one direction if the other will fail. */ if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers)) return -ENODEV; if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers)) return -ENODEV; if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) && (channel->wr_synchronous || !channel->wr_allow_partial || !channel->wr_supports_nonempty)) { dev_err(endpoint->dev, "open() failed: O_NONBLOCK not allowed for read on this device\n"); return -ENODEV; } if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) && (channel->rd_synchronous || !channel->rd_allow_partial)) { dev_err(endpoint->dev, "open() failed: O_NONBLOCK not allowed for write on this device\n"); return -ENODEV; } /* * Note: open() may block on getting mutexes despite O_NONBLOCK. * This shouldn't occur normally, since multiple open of the same * file descriptor is almost always prohibited anyhow * (*_exclusive_open is normally set in real-life systems). */ if (filp->f_mode & FMODE_READ) { rc = mutex_lock_interruptible(&channel->wr_mutex); if (rc) return rc; } if (filp->f_mode & FMODE_WRITE) { rc = mutex_lock_interruptible(&channel->rd_mutex); if (rc) goto unlock_wr; } if ((filp->f_mode & FMODE_READ) && (channel->wr_ref_count != 0) && (channel->wr_exclusive_open)) { rc = -EBUSY; goto unlock; } if ((filp->f_mode & FMODE_WRITE) && (channel->rd_ref_count != 0) && (channel->rd_exclusive_open)) { rc = -EBUSY; goto unlock; } if (filp->f_mode & FMODE_READ) { if (channel->wr_ref_count == 0) { /* First open of file */ /* Move the host to first buffer */ spin_lock_irqsave(&channel->wr_spinlock, flags); channel->wr_host_buf_idx = 0; channel->wr_host_buf_pos = 0; channel->wr_fpga_buf_idx = -1; channel->wr_empty = 1; channel->wr_ready = 0; channel->wr_sleepy = 1; channel->wr_eof = -1; channel->wr_hangup = 0; spin_unlock_irqrestore(&channel->wr_spinlock, flags); iowrite32(1 | (channel->chan_num << 1) | (4 << 24) | /* Opcode 4, open channel */ ((channel->wr_synchronous & 1) << 23), channel->endpoint->registers + fpga_buf_ctrl_reg); } channel->wr_ref_count++; } if (filp->f_mode & FMODE_WRITE) { if (channel->rd_ref_count == 0) { /* First open of file */ /* Move the host to first buffer */ spin_lock_irqsave(&channel->rd_spinlock, flags); channel->rd_host_buf_idx = 0; channel->rd_host_buf_pos = 0; channel->rd_leftovers[3] = 0; /* No leftovers. */ channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1; channel->rd_full = 0; spin_unlock_irqrestore(&channel->rd_spinlock, flags); iowrite32((channel->chan_num << 1) | (4 << 24), /* Opcode 4, open channel */ channel->endpoint->registers + fpga_buf_ctrl_reg); } channel->rd_ref_count++; } unlock: if (filp->f_mode & FMODE_WRITE) mutex_unlock(&channel->rd_mutex); unlock_wr: if (filp->f_mode & FMODE_READ) mutex_unlock(&channel->wr_mutex); if (!rc && (!channel->seekable)) return nonseekable_open(inode, filp); return rc; } static int xillybus_release(struct inode *inode, struct file *filp) { unsigned long flags; struct xilly_channel *channel = filp->private_data; int buf_idx; int eof; if (channel->endpoint->fatal_error) return -EIO; if (filp->f_mode & FMODE_WRITE) { mutex_lock(&channel->rd_mutex); channel->rd_ref_count--; if (channel->rd_ref_count == 0) { /* * We rely on the kernel calling flush() * before we get here. */ iowrite32((channel->chan_num << 1) | /* Channel ID */ (5 << 24), /* Opcode 5, close channel */ channel->endpoint->registers + fpga_buf_ctrl_reg); } mutex_unlock(&channel->rd_mutex); } if (filp->f_mode & FMODE_READ) { mutex_lock(&channel->wr_mutex); channel->wr_ref_count--; if (channel->wr_ref_count == 0) { iowrite32(1 | (channel->chan_num << 1) | (5 << 24), /* Opcode 5, close channel */ channel->endpoint->registers + fpga_buf_ctrl_reg); /* * This is crazily cautious: We make sure that not * only that we got an EOF (be it because we closed * the channel or because of a user's EOF), but verify * that it's one beyond the last buffer arrived, so * we have no leftover buffers pending before wrapping * up (which can only happen in asynchronous channels, * BTW) */ while (1) { spin_lock_irqsave(&channel->wr_spinlock, flags); buf_idx = channel->wr_fpga_buf_idx; eof = channel->wr_eof; channel->wr_sleepy = 1; spin_unlock_irqrestore(&channel->wr_spinlock, flags); /* * Check if eof points at the buffer after * the last one the FPGA submitted. Note that * no EOF is marked by negative eof. */ buf_idx++; if (buf_idx == channel->num_wr_buffers) buf_idx = 0; if (buf_idx == eof) break; /* * Steal extra 100 ms if awaken by interrupt. * This is a simple workaround for an * interrupt pending when entering, which would * otherwise result in declaring the hardware * non-responsive. */ if (wait_event_interruptible( channel->wr_wait, (!channel->wr_sleepy))) msleep(100); if (channel->wr_sleepy) { mutex_unlock(&channel->wr_mutex); dev_warn(channel->endpoint->dev, "Hardware failed to respond to close command, therefore left in messy state.\n"); return -EINTR; } } } mutex_unlock(&channel->wr_mutex); } return 0; } static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence) { struct xilly_channel *channel = filp->private_data; loff_t pos = filp->f_pos; int rc = 0; /* * Take both mutexes not allowing interrupts, since it seems like * common applications don't expect an -EINTR here. Besides, multiple * access to a single file descriptor on seekable devices is a mess * anyhow. */ if (channel->endpoint->fatal_error) return -EIO; mutex_lock(&channel->wr_mutex); mutex_lock(&channel->rd_mutex); switch (whence) { case SEEK_SET: pos = offset; break; case SEEK_CUR: pos += offset; break; case SEEK_END: pos = offset; /* Going to the end => to the beginning */ break; default: rc = -EINVAL; goto end; } /* In any case, we must finish on an element boundary */ if (pos & ((1 << channel->log2_element_size) - 1)) { rc = -EINVAL; goto end; } mutex_lock(&channel->endpoint->register_mutex); iowrite32(pos >> channel->log2_element_size, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32((channel->chan_num << 1) | (6 << 24), /* Opcode 6, set address */ channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint->register_mutex); end: mutex_unlock(&channel->rd_mutex); mutex_unlock(&channel->wr_mutex); if (rc) /* Return error after releasing mutexes */ return rc; filp->f_pos = pos; /* * Since seekable devices are allowed only when the channel is * synchronous, we assume that there is no data pending in either * direction (which holds true as long as no concurrent access on the * file descriptor takes place). * The only thing we may need to throw away is leftovers from partial * write() flush. */ channel->rd_leftovers[3] = 0; return pos; } static unsigned int xillybus_poll(struct file *filp, poll_table *wait) { struct xilly_channel *channel = filp->private_data; unsigned int mask = 0; unsigned long flags; poll_wait(filp, &channel->endpoint->ep_wait, wait); /* * poll() won't play ball regarding read() channels which * aren't asynchronous and support the nonempty message. Allowing * that will create situations where data has been delivered at * the FPGA, and users expecting select() to wake up, which it may * not. */ if (!channel->wr_synchronous && channel->wr_supports_nonempty) { poll_wait(filp, &channel->wr_wait, wait); poll_wait(filp, &channel->wr_ready_wait, wait); spin_lock_irqsave(&channel->wr_spinlock, flags); if (!channel->wr_empty || channel->wr_ready) mask |= POLLIN | POLLRDNORM; if (channel->wr_hangup) /* * Not POLLHUP, because its behavior is in the * mist, and POLLIN does what we want: Wake up * the read file descriptor so it sees EOF. */ mask |= POLLIN | POLLRDNORM; spin_unlock_irqrestore(&channel->wr_spinlock, flags); } /* * If partial data write is disallowed on a write() channel, * it's pointless to ever signal OK to write, because is could * block despite some space being available. */ if (channel->rd_allow_partial) { poll_wait(filp, &channel->rd_wait, wait); spin_lock_irqsave(&channel->rd_spinlock, flags); if (!channel->rd_full) mask |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&channel->rd_spinlock, flags); } if (channel->endpoint->fatal_error) mask |= POLLERR; return mask; } static const struct file_operations xillybus_fops = { .owner = THIS_MODULE, .read = xillybus_read, .write = xillybus_write, .open = xillybus_open, .flush = xillybus_flush, .release = xillybus_release, .llseek = xillybus_llseek, .poll = xillybus_poll, }; static int xillybus_init_chrdev(struct xilly_endpoint *endpoint, const unsigned char *idt) { int rc; dev_t dev; int devnum, i, minor, major; char devname[48]; struct device *device; rc = alloc_chrdev_region(&dev, 0, /* minor start */ endpoint->num_channels, xillyname); if (rc) { dev_warn(endpoint->dev, "Failed to obtain major/minors"); return rc; } endpoint->major = major = MAJOR(dev); endpoint->lowest_minor = minor = MINOR(dev); cdev_init(&endpoint->cdev, &xillybus_fops); endpoint->cdev.owner = endpoint->ephw->owner; rc = cdev_add(&endpoint->cdev, MKDEV(major, minor), endpoint->num_channels); if (rc) { dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n"); goto unregister_chrdev; } idt++; for (i = minor, devnum = 0; devnum < endpoint->num_channels; devnum++, i++) { snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt); devname[sizeof(devname)-1] = 0; /* Should never matter */ while (*idt++) /* Skip to next */; device = device_create(xillybus_class, NULL, MKDEV(major, i), NULL, "%s", devname); if (IS_ERR(device)) { dev_warn(endpoint->dev, "Failed to create %s device. Aborting.\n", devname); rc = -ENODEV; goto unroll_device_create; } } dev_info(endpoint->dev, "Created %d device files.\n", endpoint->num_channels); return 0; /* succeed */ unroll_device_create: devnum--; i--; for (; devnum >= 0; devnum--, i--) device_destroy(xillybus_class, MKDEV(major, i)); cdev_del(&endpoint->cdev); unregister_chrdev: unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels); return rc; } static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint) { int minor; for (minor = endpoint->lowest_minor; minor < (endpoint->lowest_minor + endpoint->num_channels); minor++) device_destroy(xillybus_class, MKDEV(endpoint->major, minor)); cdev_del(&endpoint->cdev); unregister_chrdev_region(MKDEV(endpoint->major, endpoint->lowest_minor), endpoint->num_channels); dev_info(endpoint->dev, "Removed %d device files.\n", endpoint->num_channels); } struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev, struct device *dev, struct xilly_endpoint_hardware *ephw) { struct xilly_endpoint *endpoint; endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL); if (!endpoint) return NULL; endpoint->pdev = pdev; endpoint->dev = dev; endpoint->ephw = ephw; endpoint->msg_counter = 0x0b; endpoint->failed_messages = 0; endpoint->fatal_error = 0; init_waitqueue_head(&endpoint->ep_wait); mutex_init(&endpoint->register_mutex); return endpoint; } EXPORT_SYMBOL(xillybus_init_endpoint); static int xilly_quiesce(struct xilly_endpoint *endpoint) { long t; endpoint->idtlen = -1; iowrite32((u32) (endpoint->dma_using_dac & 0x0001), endpoint->registers + fpga_dma_control_reg); t = wait_event_interruptible_timeout(endpoint->ep_wait, (endpoint->idtlen >= 0), XILLY_TIMEOUT); if (t <= 0) { dev_err(endpoint->dev, "Failed to quiesce the device on exit.\n"); return -ENODEV; } return 0; } int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint) { int rc; long t; void *bootstrap_resources; int idtbuffersize = (1 << PAGE_SHIFT); struct device *dev = endpoint->dev; /* * The bogus IDT is used during bootstrap for allocating the initial * message buffer, and then the message buffer and space for the IDT * itself. The initial message buffer is of a single page's size, but * it's soon replaced with a more modest one (and memory is freed). */ unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0, 3, 192, PAGE_SHIFT, 0 }; struct xilly_idt_handle idt_handle; /* * Writing the value 0x00000001 to Endianness register signals which * endianness this processor is using, so the FPGA can swap words as * necessary. */ iowrite32(1, endpoint->registers + fpga_endian_reg); /* Bootstrap phase I: Allocate temporary message buffer */ bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL); if (!bootstrap_resources) return -ENOMEM; endpoint->num_channels = 0; rc = xilly_setupchannels(endpoint, bogus_idt, 1); if (rc) return rc; /* Clear the message subsystem (and counter in particular) */ iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg); endpoint->idtlen = -1; /* * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT * buffer size. */ iowrite32((u32) (endpoint->dma_using_dac & 0x0001), endpoint->registers + fpga_dma_control_reg); t = wait_event_interruptible_timeout(endpoint->ep_wait, (endpoint->idtlen >= 0), XILLY_TIMEOUT); if (t <= 0) { dev_err(endpoint->dev, "No response from FPGA. Aborting.\n"); return -ENODEV; } /* Enable DMA */ iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)), endpoint->registers + fpga_dma_control_reg); /* Bootstrap phase II: Allocate buffer for IDT and obtain it */ while (endpoint->idtlen >= idtbuffersize) { idtbuffersize *= 2; bogus_idt[6]++; } endpoint->num_channels = 1; rc = xilly_setupchannels(endpoint, bogus_idt, 2); if (rc) goto failed_idt; rc = xilly_obtain_idt(endpoint); if (rc) goto failed_idt; rc = xilly_scan_idt(endpoint, &idt_handle); if (rc) goto failed_idt; devres_close_group(dev, bootstrap_resources); /* Bootstrap phase III: Allocate buffers according to IDT */ rc = xilly_setupchannels(endpoint, idt_handle.chandesc, idt_handle.entries); if (rc) goto failed_idt; /* * endpoint is now completely configured. We put it on the list * available to open() before registering the char device(s) */ mutex_lock(&ep_list_lock); list_add_tail(&endpoint->ep_list, &list_of_endpoints); mutex_unlock(&ep_list_lock); rc = xillybus_init_chrdev(endpoint, idt_handle.idt); if (rc) goto failed_chrdevs; devres_release_group(dev, bootstrap_resources); return 0; failed_chrdevs: mutex_lock(&ep_list_lock); list_del(&endpoint->ep_list); mutex_unlock(&ep_list_lock); failed_idt: xilly_quiesce(endpoint); flush_workqueue(xillybus_wq); return rc; } EXPORT_SYMBOL(xillybus_endpoint_discovery); void xillybus_endpoint_remove(struct xilly_endpoint *endpoint) { xillybus_cleanup_chrdev(endpoint); mutex_lock(&ep_list_lock); list_del(&endpoint->ep_list); mutex_unlock(&ep_list_lock); xilly_quiesce(endpoint); /* * Flushing is done upon endpoint release to prevent access to memory * just about to be released. This makes the quiesce complete. */ flush_workqueue(xillybus_wq); } EXPORT_SYMBOL(xillybus_endpoint_remove); static int __init xillybus_init(void) { mutex_init(&ep_list_lock); xillybus_class = class_create(THIS_MODULE, xillyname); if (IS_ERR(xillybus_class)) return PTR_ERR(xillybus_class); xillybus_wq = alloc_workqueue(xillyname, 0, 0); if (!xillybus_wq) { class_destroy(xillybus_class); return -ENOMEM; } return 0; } static void __exit xillybus_exit(void) { /* flush_workqueue() was called for each endpoint released */ destroy_workqueue(xillybus_wq); class_destroy(xillybus_class); } module_init(xillybus_init); module_exit(xillybus_exit);
gpl-2.0
PureNexusProject/android_kernel_google_molly
drivers/usb/core/hcd-pci.c
806
16063
/* * (C) Copyright David Brownell 2000-2002 * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <asm/io.h> #include <asm/irq.h> #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #endif #include "usb.h" /* PCI-based HCs are common, but plenty of non-PCI HCs are used too */ #ifdef CONFIG_PM_SLEEP /* Coordinate handoffs between EHCI and companion controllers * during system resume */ static DEFINE_MUTEX(companions_mutex); #define CL_UHCI PCI_CLASS_SERIAL_USB_UHCI #define CL_OHCI PCI_CLASS_SERIAL_USB_OHCI #define CL_EHCI PCI_CLASS_SERIAL_USB_EHCI enum companion_action { SET_HS_COMPANION, CLEAR_HS_COMPANION, WAIT_FOR_COMPANIONS }; static void companion_common(struct pci_dev *pdev, struct usb_hcd *hcd, enum companion_action action) { struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); /* Iterate through other PCI functions in the same slot. * If pdev is OHCI or UHCI then we are looking for EHCI, and * vice versa. */ companion = NULL; for_each_pci_dev(companion) { if (companion->bus != pdev->bus || PCI_SLOT(companion->devfn) != slot) continue; companion_hcd = pci_get_drvdata(companion); if (!companion_hcd) continue; /* For SET_HS_COMPANION, store a pointer to the EHCI bus in * the OHCI/UHCI companion bus structure. * For CLEAR_HS_COMPANION, clear the pointer to the EHCI bus * in the OHCI/UHCI companion bus structure. * For WAIT_FOR_COMPANIONS, wait until the OHCI/UHCI * companion controllers have fully resumed. */ if ((pdev->class == CL_OHCI || pdev->class == CL_UHCI) && companion->class == CL_EHCI) { /* action must be SET_HS_COMPANION */ dev_dbg(&companion->dev, "HS companion for %s\n", dev_name(&pdev->dev)); hcd->self.hs_companion = &companion_hcd->self; } else if (pdev->class == CL_EHCI && (companion->class == CL_OHCI || companion->class == CL_UHCI)) { switch (action) { case SET_HS_COMPANION: dev_dbg(&pdev->dev, "HS companion for %s\n", dev_name(&companion->dev)); companion_hcd->self.hs_companion = &hcd->self; break; case CLEAR_HS_COMPANION: companion_hcd->self.hs_companion = NULL; break; case WAIT_FOR_COMPANIONS: device_pm_wait_for_dev(&pdev->dev, &companion->dev); break; } } } } static void set_hs_companion(struct pci_dev *pdev, struct usb_hcd *hcd) { mutex_lock(&companions_mutex); dev_set_drvdata(&pdev->dev, hcd); companion_common(pdev, hcd, SET_HS_COMPANION); mutex_unlock(&companions_mutex); } static void clear_hs_companion(struct pci_dev *pdev, struct usb_hcd *hcd) { mutex_lock(&companions_mutex); dev_set_drvdata(&pdev->dev, NULL); /* If pdev is OHCI or UHCI, just clear its hs_companion pointer */ if (pdev->class == CL_OHCI || pdev->class == CL_UHCI) hcd->self.hs_companion = NULL; /* Otherwise search for companion buses and clear their pointers */ else companion_common(pdev, hcd, CLEAR_HS_COMPANION); mutex_unlock(&companions_mutex); } static void wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd) { /* Only EHCI controllers need to wait. * No locking is needed because a controller cannot be resumed * while one of its companions is getting unbound. */ if (pdev->class == CL_EHCI) companion_common(pdev, hcd, WAIT_FOR_COMPANIONS); } #else /* !CONFIG_PM_SLEEP */ static inline void set_hs_companion(struct pci_dev *d, struct usb_hcd *h) {} static inline void clear_hs_companion(struct pci_dev *d, struct usb_hcd *h) {} static inline void wait_for_companions(struct pci_dev *d, struct usb_hcd *h) {} #endif /* !CONFIG_PM_SLEEP */ /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_pci_probe - initialize PCI-based HCDs * @dev: USB Host Controller being probed * @id: pci hotplug id connecting controller to HCD framework * Context: !in_interrupt() * * Allocates basic PCI resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * * Store this function in the HCD's struct pci_driver as probe(). */ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct hc_driver *driver; struct usb_hcd *hcd; int retval; if (usb_disabled()) return -ENODEV; if (!id) return -EINVAL; driver = (struct hc_driver *)id->driver_data; if (!driver) return -EINVAL; if (pci_enable_device(dev) < 0) return -ENODEV; dev->current_state = PCI_D0; /* The xHCI driver supports MSI and MSI-X, * so don't fail if the BIOS doesn't provide a legacy IRQ. */ if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { dev_err(&dev->dev, "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", pci_name(dev)); retval = -ENODEV; goto disable_pci; } hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); if (!hcd) { retval = -ENOMEM; goto disable_pci; } if (driver->flags & HCD_MEMORY) { /* EHCI, OHCI */ hcd->rsrc_start = pci_resource_start(dev, 0); hcd->rsrc_len = pci_resource_len(dev, 0); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&dev->dev, "controller already in use\n"); retval = -EBUSY; goto clear_companion; } hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); retval = -EFAULT; goto release_mem_region; } } else { /* UHCI */ int region; for (region = 0; region < PCI_ROM_RESOURCE; region++) { if (!(pci_resource_flags(dev, region) & IORESOURCE_IO)) continue; hcd->rsrc_start = pci_resource_start(dev, region); hcd->rsrc_len = pci_resource_len(dev, region); if (request_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { dev_dbg(&dev->dev, "no i/o regions available\n"); retval = -EBUSY; goto clear_companion; } } pci_set_master(dev); retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); if (retval != 0) goto unmap_registers; set_hs_companion(dev, hcd); if (pci_dev_run_wake(dev)) pm_runtime_put_noidle(&dev->dev); return retval; unmap_registers: if (driver->flags & HCD_MEMORY) { iounmap(hcd->regs); release_mem_region: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); } else release_region(hcd->rsrc_start, hcd->rsrc_len); clear_companion: clear_hs_companion(dev, hcd); usb_put_hcd(hcd); disable_pci: pci_disable_device(dev); dev_err(&dev->dev, "init %s fail, %d\n", pci_name(dev), retval); return retval; } EXPORT_SYMBOL_GPL(usb_hcd_pci_probe); /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_pci_remove - shutdown processing for PCI-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_pci_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * * Store this function in the HCD's struct pci_driver as remove(). */ void usb_hcd_pci_remove(struct pci_dev *dev) { struct usb_hcd *hcd; hcd = pci_get_drvdata(dev); if (!hcd) return; if (pci_dev_run_wake(dev)) pm_runtime_get_noresume(&dev->dev); /* Fake an interrupt request in order to give the driver a chance * to test whether the controller hardware has been removed (e.g., * cardbus physical eject). */ local_irq_disable(); usb_hcd_irq(0, hcd); local_irq_enable(); usb_remove_hcd(hcd); if (hcd->driver->flags & HCD_MEMORY) { iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); } else { release_region(hcd->rsrc_start, hcd->rsrc_len); } clear_hs_companion(dev, hcd); usb_put_hcd(hcd); pci_disable_device(dev); } EXPORT_SYMBOL_GPL(usb_hcd_pci_remove); /** * usb_hcd_pci_shutdown - shutdown host controller * @dev: USB Host Controller being shutdown */ void usb_hcd_pci_shutdown(struct pci_dev *dev) { struct usb_hcd *hcd; hcd = pci_get_drvdata(dev); if (!hcd) return; if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) && hcd->driver->shutdown) { hcd->driver->shutdown(hcd); pci_disable_device(dev); } } EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown); #ifdef CONFIG_PM #ifdef CONFIG_PPC_PMAC static void powermac_set_asic(struct pci_dev *pci_dev, int enable) { /* Enanble or disable ASIC clocks for USB */ if (machine_is(powermac)) { struct device_node *of_node; of_node = pci_device_to_OF_node(pci_dev); if (of_node) pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, enable); } } #else static inline void powermac_set_asic(struct pci_dev *pci_dev, int enable) {} #endif /* CONFIG_PPC_PMAC */ static int check_root_hub_suspended(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); if (HCD_RH_RUNNING(hcd)) { dev_warn(dev, "Root hub is not suspended\n"); return -EBUSY; } if (hcd->shared_hcd) { hcd = hcd->shared_hcd; if (HCD_RH_RUNNING(hcd)) { dev_warn(dev, "Secondary root hub is not suspended\n"); return -EBUSY; } } return 0; } #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) static int suspend_common(struct device *dev, bool do_wakeup) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; /* Root hub suspend should have stopped all downstream traffic, * and all bus master traffic. And done so for both the interface * and the stub usb_device (which we check here). But maybe it * didn't; writing sysfs power/state files ignores such rules... */ retval = check_root_hub_suspended(dev); if (retval) return retval; if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) { /* Optimization: Don't suspend if a root-hub wakeup is * pending and it would cause the HCD to wake up anyway. */ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) return -EBUSY; if (do_wakeup && hcd->shared_hcd && HCD_WAKEUP_PENDING(hcd->shared_hcd)) return -EBUSY; retval = hcd->driver->pci_suspend(hcd, do_wakeup); suspend_report_result(hcd->driver->pci_suspend, retval); /* Check again in case wakeup raced with pci_suspend */ if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) || (retval == 0 && do_wakeup && hcd->shared_hcd && HCD_WAKEUP_PENDING(hcd->shared_hcd))) { if (hcd->driver->pci_resume) hcd->driver->pci_resume(hcd, false); retval = -EBUSY; } if (retval) return retval; } /* If MSI-X is enabled, the driver will have synchronized all vectors * in pci_suspend(). If MSI or legacy PCI is enabled, that will be * synchronized here. */ if (!hcd->msix_enabled) synchronize_irq(pci_dev->irq); /* Downstream ports from this root hub should already be quiesced, so * there will be no DMA activity. Now we can shut down the upstream * link (except maybe for PME# resume signaling). We'll enter a * low power state during suspend_noirq, if the hardware allows. */ pci_disable_device(pci_dev); return retval; } static int resume_common(struct device *dev, int event) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; if (HCD_RH_RUNNING(hcd) || (hcd->shared_hcd && HCD_RH_RUNNING(hcd->shared_hcd))) { dev_dbg(dev, "can't resume, not suspended!\n"); return 0; } retval = pci_enable_device(pci_dev); if (retval < 0) { dev_err(dev, "can't re-enable after resume, %d!\n", retval); return retval; } pci_set_master(pci_dev); if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) { if (event != PM_EVENT_AUTO_RESUME) wait_for_companions(pci_dev, hcd); retval = hcd->driver->pci_resume(hcd, event == PM_EVENT_RESTORE); if (retval) { dev_err(dev, "PCI post-resume error %d!\n", retval); if (hcd->shared_hcd) usb_hc_died(hcd->shared_hcd); usb_hc_died(hcd); } } return retval; } #endif /* SLEEP || RUNTIME */ #ifdef CONFIG_PM_SLEEP static int hcd_pci_suspend(struct device *dev) { return suspend_common(dev, device_may_wakeup(dev)); } static int hcd_pci_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; retval = check_root_hub_suspended(dev); if (retval) return retval; pci_save_state(pci_dev); /* If the root hub is dead rather than suspended, disallow remote * wakeup. usb_hc_died() should ensure that both hosts are marked as * dying, so we only need to check the primary roothub. */ if (HCD_DEAD(hcd)) device_set_wakeup_enable(dev, 0); dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev)); /* Possibly enable remote wakeup, * choose the appropriate low-power state, and go to that state. */ retval = pci_prepare_to_sleep(pci_dev); if (retval == -EIO) { /* Low-power not supported */ dev_dbg(dev, "--> PCI D0 legacy\n"); retval = 0; } else if (retval == 0) { dev_dbg(dev, "--> PCI %s\n", pci_power_name(pci_dev->current_state)); } else { suspend_report_result(pci_prepare_to_sleep, retval); return retval; } powermac_set_asic(pci_dev, 0); return retval; } static int hcd_pci_resume_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); powermac_set_asic(pci_dev, 1); /* Go back to D0 and disable remote wakeup */ pci_back_from_sleep(pci_dev); return 0; } static int hcd_pci_resume(struct device *dev) { return resume_common(dev, PM_EVENT_RESUME); } static int hcd_pci_restore(struct device *dev) { return resume_common(dev, PM_EVENT_RESTORE); } #else #define hcd_pci_suspend NULL #define hcd_pci_suspend_noirq NULL #define hcd_pci_resume_noirq NULL #define hcd_pci_resume NULL #define hcd_pci_restore NULL #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_RUNTIME static int hcd_pci_runtime_suspend(struct device *dev) { int retval; retval = suspend_common(dev, true); if (retval == 0) powermac_set_asic(to_pci_dev(dev), 0); dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval); return retval; } static int hcd_pci_runtime_resume(struct device *dev) { int retval; powermac_set_asic(to_pci_dev(dev), 1); retval = resume_common(dev, PM_EVENT_AUTO_RESUME); dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval); return retval; } #else #define hcd_pci_runtime_suspend NULL #define hcd_pci_runtime_resume NULL #endif /* CONFIG_PM_RUNTIME */ const struct dev_pm_ops usb_hcd_pci_pm_ops = { .suspend = hcd_pci_suspend, .suspend_noirq = hcd_pci_suspend_noirq, .resume_noirq = hcd_pci_resume_noirq, .resume = hcd_pci_resume, .freeze = check_root_hub_suspended, .freeze_noirq = check_root_hub_suspended, .thaw_noirq = NULL, .thaw = NULL, .poweroff = hcd_pci_suspend, .poweroff_noirq = hcd_pci_suspend_noirq, .restore_noirq = hcd_pci_resume_noirq, .restore = hcd_pci_restore, .runtime_suspend = hcd_pci_runtime_suspend, .runtime_resume = hcd_pci_runtime_resume, }; EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops); #endif /* CONFIG_PM */
gpl-2.0
RichardWithnell/mptcp
drivers/rtc/rtc-m41t93.c
1830
5599
/* * * Driver for ST M41T93 SPI RTC * * (c) 2010 Nikolaus Voss, Weinmann Medical GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bcd.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/spi/spi.h> #define M41T93_REG_SSEC 0 #define M41T93_REG_ST_SEC 1 #define M41T93_REG_MIN 2 #define M41T93_REG_CENT_HOUR 3 #define M41T93_REG_WDAY 4 #define M41T93_REG_DAY 5 #define M41T93_REG_MON 6 #define M41T93_REG_YEAR 7 #define M41T93_REG_ALM_HOUR_HT 0xc #define M41T93_REG_FLAGS 0xf #define M41T93_FLAG_ST (1 << 7) #define M41T93_FLAG_OF (1 << 2) #define M41T93_FLAG_BL (1 << 4) #define M41T93_FLAG_HT (1 << 6) static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data) { u8 buf[2]; /* MSB must be '1' to write */ buf[0] = addr | 0x80; buf[1] = data; return spi_write(spi, buf, sizeof(buf)); } static int m41t93_set_time(struct device *dev, struct rtc_time *tm) { struct spi_device *spi = to_spi_device(dev); int tmp; u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */ u8 * const data = &buf[1]; /* ptr to first data byte */ dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", "write", tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); if (tm->tm_year < 100) { dev_warn(&spi->dev, "unsupported date (before 2000-01-01).\n"); return -EINVAL; } tmp = spi_w8r8(spi, M41T93_REG_FLAGS); if (tmp < 0) return tmp; if (tmp & M41T93_FLAG_OF) { dev_warn(&spi->dev, "OF bit is set, resetting.\n"); m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF); tmp = spi_w8r8(spi, M41T93_REG_FLAGS); if (tmp < 0) { return tmp; } else if (tmp & M41T93_FLAG_OF) { /* OF cannot be immediately reset: oscillator has to be * restarted. */ u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST; dev_warn(&spi->dev, "OF bit is still set, kickstarting clock.\n"); m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc); reset_osc &= ~M41T93_FLAG_ST; m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc); } } data[M41T93_REG_SSEC] = 0; data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec); data[M41T93_REG_MIN] = bin2bcd(tm->tm_min); data[M41T93_REG_CENT_HOUR] = bin2bcd(tm->tm_hour) | ((tm->tm_year/100-1) << 6); data[M41T93_REG_DAY] = bin2bcd(tm->tm_mday); data[M41T93_REG_WDAY] = bin2bcd(tm->tm_wday + 1); data[M41T93_REG_MON] = bin2bcd(tm->tm_mon + 1); data[M41T93_REG_YEAR] = bin2bcd(tm->tm_year % 100); return spi_write(spi, buf, sizeof(buf)); } static int m41t93_get_time(struct device *dev, struct rtc_time *tm) { struct spi_device *spi = to_spi_device(dev); const u8 start_addr = 0; u8 buf[8]; int century_after_1900; int tmp; int ret = 0; /* Check status of clock. Two states must be considered: 1. halt bit (HT) is set: the clock is running but update of readout registers has been disabled due to power failure. This is normal case after poweron. Time is valid after resetting HT bit. 2. oscillator fail bit (OF) is set: time is invalid. */ tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT); if (tmp < 0) return tmp; if (tmp & M41T93_FLAG_HT) { dev_dbg(&spi->dev, "HT bit is set, reenable clock update.\n"); m41t93_set_reg(spi, M41T93_REG_ALM_HOUR_HT, tmp & ~M41T93_FLAG_HT); } tmp = spi_w8r8(spi, M41T93_REG_FLAGS); if (tmp < 0) return tmp; if (tmp & M41T93_FLAG_OF) { ret = -EINVAL; dev_warn(&spi->dev, "OF bit is set, write time to restart.\n"); } if (tmp & M41T93_FLAG_BL) dev_warn(&spi->dev, "BL bit is set, replace battery.\n"); /* read actual time/date */ tmp = spi_write_then_read(spi, &start_addr, 1, buf, sizeof(buf)); if (tmp < 0) return tmp; tm->tm_sec = bcd2bin(buf[M41T93_REG_ST_SEC]); tm->tm_min = bcd2bin(buf[M41T93_REG_MIN]); tm->tm_hour = bcd2bin(buf[M41T93_REG_CENT_HOUR] & 0x3f); tm->tm_mday = bcd2bin(buf[M41T93_REG_DAY]); tm->tm_mon = bcd2bin(buf[M41T93_REG_MON]) - 1; tm->tm_wday = bcd2bin(buf[M41T93_REG_WDAY] & 0x0f) - 1; century_after_1900 = (buf[M41T93_REG_CENT_HOUR] >> 6) + 1; tm->tm_year = bcd2bin(buf[M41T93_REG_YEAR]) + century_after_1900 * 100; dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", "read", tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); return ret < 0 ? ret : rtc_valid_tm(tm); } static const struct rtc_class_ops m41t93_rtc_ops = { .read_time = m41t93_get_time, .set_time = m41t93_set_time, }; static struct spi_driver m41t93_driver; static int m41t93_probe(struct spi_device *spi) { struct rtc_device *rtc; int res; spi->bits_per_word = 8; spi_setup(spi); res = spi_w8r8(spi, M41T93_REG_WDAY); if (res < 0 || (res & 0xf8) != 0) { dev_err(&spi->dev, "not found 0x%x.\n", res); return -ENODEV; } rtc = devm_rtc_device_register(&spi->dev, m41t93_driver.driver.name, &m41t93_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); spi_set_drvdata(spi, rtc); return 0; } static struct spi_driver m41t93_driver = { .driver = { .name = "rtc-m41t93", .owner = THIS_MODULE, }, .probe = m41t93_probe, }; module_spi_driver(m41t93_driver); MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>"); MODULE_DESCRIPTION("Driver for ST M41T93 SPI RTC"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:rtc-m41t93");
gpl-2.0
uberlaggydarwin/lge_d295
net/rxrpc/af_rxrpc.c
1830
20967
/* AF_RXRPC implementation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/key-type.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" MODULE_DESCRIPTION("RxRPC network protocol"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_RXRPC); unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "RxRPC debugging mask"); static int sysctl_rxrpc_max_qlen __read_mostly = 10; static struct proto rxrpc_proto; static const struct proto_ops rxrpc_rpc_ops; /* local epoch for detecting local-end reset */ __be32 rxrpc_epoch; /* current debugging ID */ atomic_t rxrpc_debug_id; /* count of skbs currently in use */ atomic_t rxrpc_n_skbs; struct workqueue_struct *rxrpc_workqueue; static void rxrpc_sock_destructor(struct sock *); /* * see if an RxRPC socket is currently writable */ static inline int rxrpc_writable(struct sock *sk) { return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; } /* * wait for write bufferage to become available */ static void rxrpc_write_space(struct sock *sk) { _enter("%p", sk); rcu_read_lock(); if (rxrpc_writable(sk)) { struct socket_wq *wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } /* * validate an RxRPC address */ static int rxrpc_validate_address(struct rxrpc_sock *rx, struct sockaddr_rxrpc *srx, int len) { if (len < sizeof(struct sockaddr_rxrpc)) return -EINVAL; if (srx->srx_family != AF_RXRPC) return -EAFNOSUPPORT; if (srx->transport_type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; len -= offsetof(struct sockaddr_rxrpc, transport); if (srx->transport_len < sizeof(sa_family_t) || srx->transport_len > len) return -EINVAL; if (srx->transport.family != rx->proto) return -EAFNOSUPPORT; switch (srx->transport.family) { case AF_INET: _debug("INET: %x @ %pI4", ntohs(srx->transport.sin.sin_port), &srx->transport.sin.sin_addr); if (srx->transport_len > 8) memset((void *)&srx->transport + 8, 0, srx->transport_len - 8); break; case AF_INET6: default: return -EAFNOSUPPORT; } return 0; } /* * bind a local address to an RxRPC socket */ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr; struct sock *sk = sock->sk; struct rxrpc_local *local; struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; __be16 service_id; int ret; _enter("%p,%p,%d", rx, saddr, len); ret = rxrpc_validate_address(rx, srx, len); if (ret < 0) goto error; lock_sock(&rx->sk); if (rx->sk.sk_state != RXRPC_UNCONNECTED) { ret = -EINVAL; goto error_unlock; } memcpy(&rx->srx, srx, sizeof(rx->srx)); /* find a local transport endpoint if we don't have one already */ local = rxrpc_lookup_local(&rx->srx); if (IS_ERR(local)) { ret = PTR_ERR(local); goto error_unlock; } rx->local = local; if (srx->srx_service) { service_id = htons(srx->srx_service); write_lock_bh(&local->services_lock); list_for_each_entry(prx, &local->services, listen_link) { if (prx->service_id == service_id) goto service_in_use; } rx->service_id = service_id; list_add_tail(&rx->listen_link, &local->services); write_unlock_bh(&local->services_lock); rx->sk.sk_state = RXRPC_SERVER_BOUND; } else { rx->sk.sk_state = RXRPC_CLIENT_BOUND; } release_sock(&rx->sk); _leave(" = 0"); return 0; service_in_use: ret = -EADDRINUSE; write_unlock_bh(&local->services_lock); error_unlock: release_sock(&rx->sk); error: _leave(" = %d", ret); return ret; } /* * set the number of pending calls permitted on a listening socket */ static int rxrpc_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct rxrpc_sock *rx = rxrpc_sk(sk); int ret; _enter("%p,%d", rx, backlog); lock_sock(&rx->sk); switch (rx->sk.sk_state) { case RXRPC_UNCONNECTED: ret = -EADDRNOTAVAIL; break; case RXRPC_CLIENT_BOUND: case RXRPC_CLIENT_CONNECTED: default: ret = -EBUSY; break; case RXRPC_SERVER_BOUND: ASSERT(rx->local != NULL); sk->sk_max_ack_backlog = backlog; rx->sk.sk_state = RXRPC_SERVER_LISTENING; ret = 0; break; } release_sock(&rx->sk); _leave(" = %d", ret); return ret; } /* * find a transport by address */ static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, struct sockaddr *addr, int addr_len, int flags, gfp_t gfp) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; struct rxrpc_transport *trans; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_peer *peer; _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); ASSERT(rx->local != NULL); ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); if (rx->srx.transport_type != srx->transport_type) return ERR_PTR(-ESOCKTNOSUPPORT); if (rx->srx.transport.family != srx->transport.family) return ERR_PTR(-EAFNOSUPPORT); /* find a remote transport endpoint from the local one */ peer = rxrpc_get_peer(srx, gfp); if (IS_ERR(peer)) return ERR_CAST(peer); /* find a transport */ trans = rxrpc_get_transport(rx->local, peer, gfp); rxrpc_put_peer(peer); _leave(" = %p", trans); return trans; } /** * rxrpc_kernel_begin_call - Allow a kernel service to begin a call * @sock: The socket on which to make the call * @srx: The address of the peer to contact (defaults to socket setting) * @key: The security context to use (defaults to socket setting) * @user_call_ID: The ID to use * * Allow a kernel service to begin a call on the nominated socket. This just * sets up all the internal tracking structures and allocates connection and * call IDs as appropriate. The call to be used is returned. * * The default socket destination address and security may be overridden by * supplying @srx and @key. */ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, struct sockaddr_rxrpc *srx, struct key *key, unsigned long user_call_ID, gfp_t gfp) { struct rxrpc_conn_bundle *bundle; struct rxrpc_transport *trans; struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); __be16 service_id; _enter(",,%x,%lx", key_serial(key), user_call_ID); lock_sock(&rx->sk); if (srx) { trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx, sizeof(*srx), 0, gfp); if (IS_ERR(trans)) { call = ERR_CAST(trans); trans = NULL; goto out_notrans; } } else { trans = rx->trans; if (!trans) { call = ERR_PTR(-ENOTCONN); goto out_notrans; } atomic_inc(&trans->usage); } service_id = rx->service_id; if (srx) service_id = htons(srx->srx_service); if (!key) key = rx->key; if (key && !key->payload.data) key = NULL; /* a no-security key */ bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp); if (IS_ERR(bundle)) { call = ERR_CAST(bundle); goto out; } call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true, gfp); rxrpc_put_bundle(trans, bundle); out: rxrpc_put_transport(trans); out_notrans: release_sock(&rx->sk); _leave(" = %p", call); return call; } EXPORT_SYMBOL(rxrpc_kernel_begin_call); /** * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using * @call: The call to end * * Allow a kernel service to end a call it was using. The call must be * complete before this is called (the call should be aborted if necessary). */ void rxrpc_kernel_end_call(struct rxrpc_call *call) { _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); rxrpc_remove_user_ID(call->socket, call); rxrpc_put_call(call); } EXPORT_SYMBOL(rxrpc_kernel_end_call); /** * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages * @sock: The socket to intercept received messages on * @interceptor: The function to pass the messages to * * Allow a kernel service to intercept messages heading for the Rx queue on an * RxRPC socket. They get passed to the specified function instead. * @interceptor should free the socket buffers it is given. @interceptor is * called with the socket receive queue spinlock held and softirqs disabled - * this ensures that the messages will be delivered in the right order. */ void rxrpc_kernel_intercept_rx_messages(struct socket *sock, rxrpc_interceptor_t interceptor) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); _enter(""); rx->interceptor = interceptor; } EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages); /* * connect an RxRPC socket * - this just targets it at a specific destination; no actual connection * negotiation takes place */ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; struct sock *sk = sock->sk; struct rxrpc_transport *trans; struct rxrpc_local *local; struct rxrpc_sock *rx = rxrpc_sk(sk); int ret; _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); ret = rxrpc_validate_address(rx, srx, addr_len); if (ret < 0) { _leave(" = %d [bad addr]", ret); return ret; } lock_sock(&rx->sk); switch (rx->sk.sk_state) { case RXRPC_UNCONNECTED: /* find a local transport endpoint if we don't have one already */ ASSERTCMP(rx->local, ==, NULL); rx->srx.srx_family = AF_RXRPC; rx->srx.srx_service = 0; rx->srx.transport_type = srx->transport_type; rx->srx.transport_len = sizeof(sa_family_t); rx->srx.transport.family = srx->transport.family; local = rxrpc_lookup_local(&rx->srx); if (IS_ERR(local)) { release_sock(&rx->sk); return PTR_ERR(local); } rx->local = local; rx->sk.sk_state = RXRPC_CLIENT_BOUND; case RXRPC_CLIENT_BOUND: break; case RXRPC_CLIENT_CONNECTED: release_sock(&rx->sk); return -EISCONN; default: release_sock(&rx->sk); return -EBUSY; /* server sockets can't connect as well */ } trans = rxrpc_name_to_transport(sock, addr, addr_len, flags, GFP_KERNEL); if (IS_ERR(trans)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(trans)); return PTR_ERR(trans); } rx->trans = trans; rx->service_id = htons(srx->srx_service); rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; release_sock(&rx->sk); return 0; } /* * send a message through an RxRPC socket * - in a client this does a number of things: * - finds/sets up a connection for the security specified (if any) * - initiates a call (ID in control data) * - ends the request phase of a call (if MSG_MORE is not set) * - sends a call data packet * - may send an abort (abort code in control data) */ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len) { struct rxrpc_transport *trans; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); int ret; _enter(",{%d},,%zu", rx->sk.sk_state, len); if (m->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (m->msg_name) { ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); if (ret < 0) { _leave(" = %d [bad addr]", ret); return ret; } } trans = NULL; lock_sock(&rx->sk); if (m->msg_name) { ret = -EISCONN; trans = rxrpc_name_to_transport(sock, m->msg_name, m->msg_namelen, 0, GFP_KERNEL); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; goto out; } } else { trans = rx->trans; if (trans) atomic_inc(&trans->usage); } switch (rx->sk.sk_state) { case RXRPC_SERVER_LISTENING: if (!m->msg_name) { ret = rxrpc_server_sendmsg(iocb, rx, m, len); break; } case RXRPC_SERVER_BOUND: case RXRPC_CLIENT_BOUND: if (!m->msg_name) { ret = -ENOTCONN; break; } case RXRPC_CLIENT_CONNECTED: ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len); break; default: ret = -ENOTCONN; break; } out: release_sock(&rx->sk); if (trans) rxrpc_put_transport(trans); _leave(" = %d", ret); return ret; } /* * set RxRPC socket options */ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); unsigned min_sec_level; int ret; _enter(",%d,%d,,%d", level, optname, optlen); lock_sock(&rx->sk); ret = -EOPNOTSUPP; if (level == SOL_RXRPC) { switch (optname) { case RXRPC_EXCLUSIVE_CONNECTION: ret = -EINVAL; if (optlen != 0) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); goto success; case RXRPC_SECURITY_KEY: ret = -EINVAL; if (rx->key) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = rxrpc_request_key(rx, optval, optlen); goto error; case RXRPC_SECURITY_KEYRING: ret = -EINVAL; if (rx->key) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = rxrpc_server_keyring(rx, optval, optlen); goto error; case RXRPC_MIN_SECURITY_LEVEL: ret = -EINVAL; if (optlen != sizeof(unsigned)) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = get_user(min_sec_level, (unsigned __user *) optval); if (ret < 0) goto error; ret = -EINVAL; if (min_sec_level > RXRPC_SECURITY_MAX) goto error; rx->min_sec_level = min_sec_level; goto success; default: break; } } success: ret = 0; error: release_sock(&rx->sk); return ret; } /* * permit an RxRPC socket to be polled */ static unsigned int rxrpc_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* the socket is readable if there are any messages waiting on the Rx * queue */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* the socket is writable if there is space to add new data to the * socket; there is no guarantee that any particular call in progress * on the socket may have space in the Tx ACK window */ if (rxrpc_writable(sk)) mask |= POLLOUT | POLLWRNORM; return mask; } /* * create an RxRPC socket */ static int rxrpc_create(struct net *net, struct socket *sock, int protocol, int kern) { struct rxrpc_sock *rx; struct sock *sk; _enter("%p,%d", sock, protocol); if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; /* we support transport protocol UDP only */ if (protocol != PF_INET) return -EPROTONOSUPPORT; if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sock->ops = &rxrpc_rpc_ops; sock->state = SS_UNCONNECTED; sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_state = RXRPC_UNCONNECTED; sk->sk_write_space = rxrpc_write_space; sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; sk->sk_destruct = rxrpc_sock_destructor; rx = rxrpc_sk(sk); rx->proto = protocol; rx->calls = RB_ROOT; INIT_LIST_HEAD(&rx->listen_link); INIT_LIST_HEAD(&rx->secureq); INIT_LIST_HEAD(&rx->acceptq); rwlock_init(&rx->call_lock); memset(&rx->srx, 0, sizeof(rx->srx)); _leave(" = 0 [%p]", rx); return 0; } /* * RxRPC socket destructor */ static void rxrpc_sock_destructor(struct sock *sk) { _enter("%p", sk); rxrpc_purge_queue(&sk->sk_receive_queue); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(!sk_unhashed(sk)); WARN_ON(sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { WARN(1, "Attempt to release alive rxrpc socket: %p\n", sk); return; } } /* * release an RxRPC socket */ static int rxrpc_release_sock(struct sock *sk) { struct rxrpc_sock *rx = rxrpc_sk(sk); _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); /* declare the socket closed for business */ sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; spin_lock_bh(&sk->sk_receive_queue.lock); sk->sk_state = RXRPC_CLOSE; spin_unlock_bh(&sk->sk_receive_queue.lock); ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1); if (!list_empty(&rx->listen_link)) { write_lock_bh(&rx->local->services_lock); list_del(&rx->listen_link); write_unlock_bh(&rx->local->services_lock); } /* try to flush out this socket */ rxrpc_release_calls_on_socket(rx); flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); if (rx->conn) { rxrpc_put_connection(rx->conn); rx->conn = NULL; } if (rx->bundle) { rxrpc_put_bundle(rx->trans, rx->bundle); rx->bundle = NULL; } if (rx->trans) { rxrpc_put_transport(rx->trans); rx->trans = NULL; } if (rx->local) { rxrpc_put_local(rx->local); rx->local = NULL; } key_put(rx->key); rx->key = NULL; key_put(rx->securities); rx->securities = NULL; sock_put(sk); _leave(" = 0"); return 0; } /* * release an RxRPC BSD socket on close() or equivalent */ static int rxrpc_release(struct socket *sock) { struct sock *sk = sock->sk; _enter("%p{%p}", sock, sk); if (!sk) return 0; sock->sk = NULL; return rxrpc_release_sock(sk); } /* * RxRPC network protocol */ static const struct proto_ops rxrpc_rpc_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = rxrpc_release, .bind = rxrpc_bind, .connect = rxrpc_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = rxrpc_poll, .ioctl = sock_no_ioctl, .listen = rxrpc_listen, .shutdown = sock_no_shutdown, .setsockopt = rxrpc_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = rxrpc_sendmsg, .recvmsg = rxrpc_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct proto rxrpc_proto = { .name = "RXRPC", .owner = THIS_MODULE, .obj_size = sizeof(struct rxrpc_sock), .max_header = sizeof(struct rxrpc_header), }; static const struct net_proto_family rxrpc_family_ops = { .family = PF_RXRPC, .create = rxrpc_create, .owner = THIS_MODULE, }; /* * initialise and register the RxRPC protocol */ static int __init af_rxrpc_init(void) { struct sk_buff *dummy_skb; int ret = -1; BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb)); rxrpc_epoch = htonl(get_seconds()); ret = -ENOMEM; rxrpc_call_jar = kmem_cache_create( "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, SLAB_HWCACHE_ALIGN, NULL); if (!rxrpc_call_jar) { printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); goto error_call_jar; } rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); if (!rxrpc_workqueue) { printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); goto error_work_queue; } ret = proto_register(&rxrpc_proto, 1); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); goto error_proto; } ret = sock_register(&rxrpc_family_ops); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); goto error_sock; } ret = register_key_type(&key_type_rxrpc); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); goto error_key_type; } ret = register_key_type(&key_type_rxrpc_s); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); goto error_key_type_s; } #ifdef CONFIG_PROC_FS proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops); proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops); #endif return 0; error_key_type_s: unregister_key_type(&key_type_rxrpc); error_key_type: sock_unregister(PF_RXRPC); error_sock: proto_unregister(&rxrpc_proto); error_proto: destroy_workqueue(rxrpc_workqueue); error_work_queue: kmem_cache_destroy(rxrpc_call_jar); error_call_jar: return ret; } /* * unregister the RxRPC protocol */ static void __exit af_rxrpc_exit(void) { _enter(""); unregister_key_type(&key_type_rxrpc_s); unregister_key_type(&key_type_rxrpc); sock_unregister(PF_RXRPC); proto_unregister(&rxrpc_proto); rxrpc_destroy_all_calls(); rxrpc_destroy_all_connections(); rxrpc_destroy_all_transports(); rxrpc_destroy_all_peers(); rxrpc_destroy_all_locals(); ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); _debug("flush scheduled work"); flush_workqueue(rxrpc_workqueue); proc_net_remove(&init_net, "rxrpc_conns"); proc_net_remove(&init_net, "rxrpc_calls"); destroy_workqueue(rxrpc_workqueue); kmem_cache_destroy(rxrpc_call_jar); _leave(""); } module_init(af_rxrpc_init); module_exit(af_rxrpc_exit);
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_asus_Z00A
drivers/media/i2c/soc_camera/mt9t031.c
2086
22923
/* * Driver for MT9T031 CMOS Image Sensor from Micron * * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering <lg@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/device.h> #include <linux/i2c.h> #include <linux/log2.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/soc_camera.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-subdev.h> #include <media/v4l2-ctrls.h> /* * ATTENTION: this driver still cannot be used outside of the soc-camera * framework because of its PM implementation, using the video_device node. * If hardware becomes available for testing, alternative PM approaches shall * be considered and tested. */ /* * mt9t031 i2c address 0x5d * The platform has to define struct i2c_board_info objects and link to them * from struct soc_camera_host_desc */ /* mt9t031 selected register addresses */ #define MT9T031_CHIP_VERSION 0x00 #define MT9T031_ROW_START 0x01 #define MT9T031_COLUMN_START 0x02 #define MT9T031_WINDOW_HEIGHT 0x03 #define MT9T031_WINDOW_WIDTH 0x04 #define MT9T031_HORIZONTAL_BLANKING 0x05 #define MT9T031_VERTICAL_BLANKING 0x06 #define MT9T031_OUTPUT_CONTROL 0x07 #define MT9T031_SHUTTER_WIDTH_UPPER 0x08 #define MT9T031_SHUTTER_WIDTH 0x09 #define MT9T031_PIXEL_CLOCK_CONTROL 0x0a #define MT9T031_FRAME_RESTART 0x0b #define MT9T031_SHUTTER_DELAY 0x0c #define MT9T031_RESET 0x0d #define MT9T031_READ_MODE_1 0x1e #define MT9T031_READ_MODE_2 0x20 #define MT9T031_READ_MODE_3 0x21 #define MT9T031_ROW_ADDRESS_MODE 0x22 #define MT9T031_COLUMN_ADDRESS_MODE 0x23 #define MT9T031_GLOBAL_GAIN 0x35 #define MT9T031_CHIP_ENABLE 0xF8 #define MT9T031_MAX_HEIGHT 1536 #define MT9T031_MAX_WIDTH 2048 #define MT9T031_MIN_HEIGHT 2 #define MT9T031_MIN_WIDTH 18 #define MT9T031_HORIZONTAL_BLANK 142 #define MT9T031_VERTICAL_BLANK 25 #define MT9T031_COLUMN_SKIP 32 #define MT9T031_ROW_SKIP 20 struct mt9t031 { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct { /* exposure/auto-exposure cluster */ struct v4l2_ctrl *autoexposure; struct v4l2_ctrl *exposure; }; struct v4l2_rect rect; /* Sensor window */ int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */ u16 xskip; u16 yskip; unsigned int total_h; unsigned short y_skip_top; /* Lines to skip at the top */ }; static struct mt9t031 *to_mt9t031(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct mt9t031, subdev); } static int reg_read(struct i2c_client *client, const u8 reg) { return i2c_smbus_read_word_swapped(client, reg); } static int reg_write(struct i2c_client *client, const u8 reg, const u16 data) { return i2c_smbus_write_word_swapped(client, reg, data); } static int reg_set(struct i2c_client *client, const u8 reg, const u16 data) { int ret; ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, ret | data); } static int reg_clear(struct i2c_client *client, const u8 reg, const u16 data) { int ret; ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, ret & ~data); } static int set_shutter(struct i2c_client *client, const u32 data) { int ret; ret = reg_write(client, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16); if (ret >= 0) ret = reg_write(client, MT9T031_SHUTTER_WIDTH, data & 0xffff); return ret; } static int get_shutter(struct i2c_client *client, u32 *data) { int ret; ret = reg_read(client, MT9T031_SHUTTER_WIDTH_UPPER); *data = ret << 16; if (ret >= 0) ret = reg_read(client, MT9T031_SHUTTER_WIDTH); *data |= ret & 0xffff; return ret < 0 ? ret : 0; } static int mt9t031_idle(struct i2c_client *client) { int ret; /* Disable chip output, synchronous option update */ ret = reg_write(client, MT9T031_RESET, 1); if (ret >= 0) ret = reg_write(client, MT9T031_RESET, 0); if (ret >= 0) ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); return ret >= 0 ? 0 : -EIO; } static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (enable) /* Switch to master "normal" mode */ ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 2); else /* Stop sensor readout */ ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); if (ret < 0) return -EIO; return 0; } /* target must be _even_ */ static u16 mt9t031_skip(s32 *source, s32 target, s32 max) { unsigned int skip; if (*source < target + target / 2) { *source = target; return 1; } skip = min(max, *source + target / 2) / target; if (skip > 8) skip = 8; *source = target * skip; return skip; } /* rect is the sensor rectangle, the caller guarantees parameter validity */ static int mt9t031_set_params(struct i2c_client *client, struct v4l2_rect *rect, u16 xskip, u16 yskip) { struct mt9t031 *mt9t031 = to_mt9t031(client); int ret; u16 xbin, ybin; const u16 hblank = MT9T031_HORIZONTAL_BLANK, vblank = MT9T031_VERTICAL_BLANK; xbin = min(xskip, (u16)3); ybin = min(yskip, (u16)3); /* * Could just do roundup(rect->left, [xy]bin * 2); but this is cheaper. * There is always a valid suitably aligned value. The worst case is * xbin = 3, width = 2048. Then we will start at 36, the last read out * pixel will be 2083, which is < 2085 - first black pixel. * * MT9T031 datasheet imposes window left border alignment, depending on * the selected xskip. Failing to conform to this requirement produces * dark horizontal stripes in the image. However, even obeying to this * requirement doesn't eliminate the stripes in all configurations. They * appear "locally reproducibly," but can differ between tests under * different lighting conditions. */ switch (xbin) { case 1: rect->left &= ~1; break; case 2: rect->left &= ~3; break; case 3: rect->left = rect->left > roundup(MT9T031_COLUMN_SKIP, 6) ? (rect->left / 6) * 6 : roundup(MT9T031_COLUMN_SKIP, 6); } rect->top &= ~1; dev_dbg(&client->dev, "skip %u:%u, rect %ux%u@%u:%u\n", xskip, yskip, rect->width, rect->height, rect->left, rect->top); /* Disable register update, reconfigure atomically */ ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 1); if (ret < 0) return ret; /* Blanking and start values - default... */ ret = reg_write(client, MT9T031_HORIZONTAL_BLANKING, hblank); if (ret >= 0) ret = reg_write(client, MT9T031_VERTICAL_BLANKING, vblank); if (yskip != mt9t031->yskip || xskip != mt9t031->xskip) { /* Binning, skipping */ if (ret >= 0) ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE, ((xbin - 1) << 4) | (xskip - 1)); if (ret >= 0) ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE, ((ybin - 1) << 4) | (yskip - 1)); } dev_dbg(&client->dev, "new physical left %u, top %u\n", rect->left, rect->top); /* * The caller provides a supported format, as guaranteed by * .try_mbus_fmt(), soc_camera_s_crop() and soc_camera_cropcap() */ if (ret >= 0) ret = reg_write(client, MT9T031_COLUMN_START, rect->left); if (ret >= 0) ret = reg_write(client, MT9T031_ROW_START, rect->top); if (ret >= 0) ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1); if (ret >= 0) ret = reg_write(client, MT9T031_WINDOW_HEIGHT, rect->height + mt9t031->y_skip_top - 1); if (ret >= 0 && v4l2_ctrl_g_ctrl(mt9t031->autoexposure) == V4L2_EXPOSURE_AUTO) { mt9t031->total_h = rect->height + mt9t031->y_skip_top + vblank; ret = set_shutter(client, mt9t031->total_h); } /* Re-enable register update, commit all changes */ if (ret >= 0) ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 1); if (ret >= 0) { mt9t031->rect = *rect; mt9t031->xskip = xskip; mt9t031->yskip = yskip; } return ret < 0 ? ret : 0; } static int mt9t031_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a) { struct v4l2_rect rect = a->c; struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); rect.width = ALIGN(rect.width, 2); rect.height = ALIGN(rect.height, 2); soc_camera_limit_side(&rect.left, &rect.width, MT9T031_COLUMN_SKIP, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH); soc_camera_limit_side(&rect.top, &rect.height, MT9T031_ROW_SKIP, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT); return mt9t031_set_params(client, &rect, mt9t031->xskip, mt9t031->yskip); } static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); a->c = mt9t031->rect; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { a->bounds.left = MT9T031_COLUMN_SKIP; a->bounds.top = MT9T031_ROW_SKIP; a->bounds.width = MT9T031_MAX_WIDTH; a->bounds.height = MT9T031_MAX_HEIGHT; a->defrect = a->bounds; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int mt9t031_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); mf->width = mt9t031->rect.width / mt9t031->xskip; mf->height = mt9t031->rect.height / mt9t031->yskip; mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; mf->colorspace = V4L2_COLORSPACE_SRGB; mf->field = V4L2_FIELD_NONE; return 0; } static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); u16 xskip, yskip; struct v4l2_rect rect = mt9t031->rect; /* * try_fmt has put width and height within limits. * S_FMT: use binning and skipping for scaling */ xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH); yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT); mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; mf->colorspace = V4L2_COLORSPACE_SRGB; /* mt9t031_set_params() doesn't change width and height */ return mt9t031_set_params(client, &rect, xskip, yskip); } /* * If a user window larger than sensor window is requested, we'll increase the * sensor window. */ static int mt9t031_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { v4l_bound_align_image( &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; mf->colorspace = V4L2_COLORSPACE_SRGB; return 0; } static int mt9t031_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9t031->model; id->revision = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int mt9t031_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; reg->val = reg_read(client, reg->reg); if (reg->val > 0xffff) return -EIO; return 0; } static int mt9t031_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; if (reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static int mt9t031_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct mt9t031 *mt9t031 = container_of(ctrl->handler, struct mt9t031, hdl); const u32 shutter_max = MT9T031_MAX_HEIGHT + MT9T031_VERTICAL_BLANK; s32 min, max; switch (ctrl->id) { case V4L2_CID_EXPOSURE_AUTO: min = mt9t031->exposure->minimum; max = mt9t031->exposure->maximum; mt9t031->exposure->val = (shutter_max / 2 + (mt9t031->total_h - 1) * (max - min)) / shutter_max + min; break; } return 0; } static int mt9t031_s_ctrl(struct v4l2_ctrl *ctrl) { struct mt9t031 *mt9t031 = container_of(ctrl->handler, struct mt9t031, hdl); struct v4l2_subdev *sd = &mt9t031->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); struct v4l2_ctrl *exp = mt9t031->exposure; int data; switch (ctrl->id) { case V4L2_CID_VFLIP: if (ctrl->val) data = reg_set(client, MT9T031_READ_MODE_2, 0x8000); else data = reg_clear(client, MT9T031_READ_MODE_2, 0x8000); if (data < 0) return -EIO; return 0; case V4L2_CID_HFLIP: if (ctrl->val) data = reg_set(client, MT9T031_READ_MODE_2, 0x4000); else data = reg_clear(client, MT9T031_READ_MODE_2, 0x4000); if (data < 0) return -EIO; return 0; case V4L2_CID_GAIN: /* See Datasheet Table 7, Gain settings. */ if (ctrl->val <= ctrl->default_value) { /* Pack it into 0..1 step 0.125, register values 0..8 */ unsigned long range = ctrl->default_value - ctrl->minimum; data = ((ctrl->val - ctrl->minimum) * 8 + range / 2) / range; dev_dbg(&client->dev, "Setting gain %d\n", data); data = reg_write(client, MT9T031_GLOBAL_GAIN, data); if (data < 0) return -EIO; } else { /* Pack it into 1.125..128 variable step, register values 9..0x7860 */ /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */ unsigned long range = ctrl->maximum - ctrl->default_value - 1; /* calculated gain: map 65..127 to 9..1024 step 0.125 */ unsigned long gain = ((ctrl->val - ctrl->default_value - 1) * 1015 + range / 2) / range + 9; if (gain <= 32) /* calculated gain 9..32 -> 9..32 */ data = gain; else if (gain <= 64) /* calculated gain 33..64 -> 0x51..0x60 */ data = ((gain - 32) * 16 + 16) / 32 + 80; else /* calculated gain 65..1024 -> (1..120) << 8 + 0x60 */ data = (((gain - 64 + 7) * 32) & 0xff00) | 0x60; dev_dbg(&client->dev, "Set gain from 0x%x to 0x%x\n", reg_read(client, MT9T031_GLOBAL_GAIN), data); data = reg_write(client, MT9T031_GLOBAL_GAIN, data); if (data < 0) return -EIO; } return 0; case V4L2_CID_EXPOSURE_AUTO: if (ctrl->val == V4L2_EXPOSURE_MANUAL) { unsigned int range = exp->maximum - exp->minimum; unsigned int shutter = ((exp->val - exp->minimum) * 1048 + range / 2) / range + 1; u32 old; get_shutter(client, &old); dev_dbg(&client->dev, "Set shutter from %u to %u\n", old, shutter); if (set_shutter(client, shutter) < 0) return -EIO; } else { const u16 vblank = MT9T031_VERTICAL_BLANK; mt9t031->total_h = mt9t031->rect.height + mt9t031->y_skip_top + vblank; if (set_shutter(client, mt9t031->total_h) < 0) return -EIO; } return 0; default: return -EINVAL; } return 0; } /* * Power Management: * This function does nothing for now but must be present for pm to work */ static int mt9t031_runtime_suspend(struct device *dev) { return 0; } /* * Power Management: * COLUMN_ADDRESS_MODE and ROW_ADDRESS_MODE are not rewritten if unchanged * they are however changed at reset if the platform hook is present * thus we rewrite them with the values stored by the driver */ static int mt9t031_runtime_resume(struct device *dev) { struct video_device *vdev = to_video_device(dev); struct v4l2_subdev *sd = soc_camera_vdev_to_subdev(vdev); struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); int ret; u16 xbin, ybin; xbin = min(mt9t031->xskip, (u16)3); ybin = min(mt9t031->yskip, (u16)3); ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE, ((xbin - 1) << 4) | (mt9t031->xskip - 1)); if (ret < 0) return ret; ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE, ((ybin - 1) << 4) | (mt9t031->yskip - 1)); if (ret < 0) return ret; return 0; } static struct dev_pm_ops mt9t031_dev_pm_ops = { .runtime_suspend = mt9t031_runtime_suspend, .runtime_resume = mt9t031_runtime_resume, }; static struct device_type mt9t031_dev_type = { .name = "MT9T031", .pm = &mt9t031_dev_pm_ops, }; static int mt9t031_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); struct video_device *vdev = soc_camera_i2c_to_vdev(client); int ret; if (on) { ret = soc_camera_power_on(&client->dev, ssdd); if (ret < 0) return ret; vdev->dev.type = &mt9t031_dev_type; } else { vdev->dev.type = NULL; soc_camera_power_off(&client->dev, ssdd); } return 0; } /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ static int mt9t031_video_probe(struct i2c_client *client) { struct mt9t031 *mt9t031 = to_mt9t031(client); s32 data; int ret; ret = mt9t031_s_power(&mt9t031->subdev, 1); if (ret < 0) return ret; ret = mt9t031_idle(client); if (ret < 0) { dev_err(&client->dev, "Failed to initialise the camera\n"); goto done; } /* Read out the chip version register */ data = reg_read(client, MT9T031_CHIP_VERSION); switch (data) { case 0x1621: mt9t031->model = V4L2_IDENT_MT9T031; break; default: dev_err(&client->dev, "No MT9T031 chip detected, register read %x\n", data); ret = -ENODEV; goto done; } dev_info(&client->dev, "Detected a MT9T031 chip ID %x\n", data); ret = v4l2_ctrl_handler_setup(&mt9t031->hdl); done: mt9t031_s_power(&mt9t031->subdev, 0); return ret; } static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9t031 *mt9t031 = to_mt9t031(client); *lines = mt9t031->y_skip_top; return 0; } static const struct v4l2_ctrl_ops mt9t031_ctrl_ops = { .g_volatile_ctrl = mt9t031_g_volatile_ctrl, .s_ctrl = mt9t031_s_ctrl, }; static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { .g_chip_ident = mt9t031_g_chip_ident, .s_power = mt9t031_s_power, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9t031_g_register, .s_register = mt9t031_s_register, #endif }; static int mt9t031_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index) return -EINVAL; *code = V4L2_MBUS_FMT_SBGGR10_1X10; return 0; } static int mt9t031_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); return 0; } static int mt9t031_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); if (soc_camera_apply_board_flags(ssdd, cfg) & V4L2_MBUS_PCLK_SAMPLE_FALLING) return reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000); else return reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000); } static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { .s_stream = mt9t031_s_stream, .s_mbus_fmt = mt9t031_s_fmt, .g_mbus_fmt = mt9t031_g_fmt, .try_mbus_fmt = mt9t031_try_fmt, .s_crop = mt9t031_s_crop, .g_crop = mt9t031_g_crop, .cropcap = mt9t031_cropcap, .enum_mbus_fmt = mt9t031_enum_fmt, .g_mbus_config = mt9t031_g_mbus_config, .s_mbus_config = mt9t031_s_mbus_config, }; static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = { .g_skip_top_lines = mt9t031_g_skip_top_lines, }; static struct v4l2_subdev_ops mt9t031_subdev_ops = { .core = &mt9t031_subdev_core_ops, .video = &mt9t031_subdev_video_ops, .sensor = &mt9t031_subdev_sensor_ops, }; static int mt9t031_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9t031 *mt9t031; struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int ret; if (!ssdd) { dev_err(&client->dev, "MT9T031 driver needs platform data\n"); return -EINVAL; } if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } mt9t031 = devm_kzalloc(&client->dev, sizeof(struct mt9t031), GFP_KERNEL); if (!mt9t031) return -ENOMEM; v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops); v4l2_ctrl_handler_init(&mt9t031->hdl, 5); v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 64); /* * Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9t031->autoexposure = v4l2_ctrl_new_std_menu(&mt9t031->hdl, &mt9t031_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_AUTO); mt9t031->exposure = v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, V4L2_CID_EXPOSURE, 1, 255, 1, 255); mt9t031->subdev.ctrl_handler = &mt9t031->hdl; if (mt9t031->hdl.error) return mt9t031->hdl.error; v4l2_ctrl_auto_cluster(2, &mt9t031->autoexposure, V4L2_EXPOSURE_MANUAL, true); mt9t031->y_skip_top = 0; mt9t031->rect.left = MT9T031_COLUMN_SKIP; mt9t031->rect.top = MT9T031_ROW_SKIP; mt9t031->rect.width = MT9T031_MAX_WIDTH; mt9t031->rect.height = MT9T031_MAX_HEIGHT; mt9t031->xskip = 1; mt9t031->yskip = 1; ret = mt9t031_video_probe(client); if (ret) v4l2_ctrl_handler_free(&mt9t031->hdl); return ret; } static int mt9t031_remove(struct i2c_client *client) { struct mt9t031 *mt9t031 = to_mt9t031(client); v4l2_device_unregister_subdev(&mt9t031->subdev); v4l2_ctrl_handler_free(&mt9t031->hdl); return 0; } static const struct i2c_device_id mt9t031_id[] = { { "mt9t031", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mt9t031_id); static struct i2c_driver mt9t031_i2c_driver = { .driver = { .name = "mt9t031", }, .probe = mt9t031_probe, .remove = mt9t031_remove, .id_table = mt9t031_id, }; module_i2c_driver(mt9t031_i2c_driver); MODULE_DESCRIPTION("Micron MT9T031 Camera driver"); MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
ArtisteHsu/jetson-tk1-r21.3-kernel
sound/soc/spear/spdif_in.c
2086
7375
/* * ALSA SoC SPDIF In Audio Layer for spear processors * * Copyright (C) 2012 ST Microelectronics * Vipin Kumar <vipin.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/spear_dma.h> #include <sound/spear_spdif.h> #include "spdif_in_regs.h" struct spdif_in_params { u32 format; }; struct spdif_in_dev { struct clk *clk; struct spear_dma_data dma_params; struct spdif_in_params saved_params; void *io_base; struct device *dev; void (*reset_perip)(void); int irq; }; static void spdif_in_configure(struct spdif_in_dev *host) { u32 ctrl = SPDIF_IN_PRTYEN | SPDIF_IN_STATEN | SPDIF_IN_USREN | SPDIF_IN_VALEN | SPDIF_IN_BLKEN; ctrl |= SPDIF_MODE_16BIT | SPDIF_FIFO_THRES_16; writel(ctrl, host->io_base + SPDIF_IN_CTRL); writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK); } static int spdif_in_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(cpu_dai); if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return -EINVAL; snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)&host->dma_params); return 0; } static void spdif_in_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return; writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK); snd_soc_dai_set_dma_data(dai, substream, NULL); } static void spdif_in_format(struct spdif_in_dev *host, u32 format) { u32 ctrl = readl(host->io_base + SPDIF_IN_CTRL); switch (format) { case SNDRV_PCM_FORMAT_S16_LE: ctrl |= SPDIF_XTRACT_16BIT; break; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: ctrl &= ~SPDIF_XTRACT_16BIT; break; } writel(ctrl, host->io_base + SPDIF_IN_CTRL); } static int spdif_in_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); u32 format; if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return -EINVAL; format = params_format(params); host->saved_params.format = format; return 0; } static int spdif_in_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); u32 ctrl; int ret = 0; if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return -EINVAL; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: clk_enable(host->clk); spdif_in_configure(host); spdif_in_format(host, host->saved_params.format); ctrl = readl(host->io_base + SPDIF_IN_CTRL); ctrl |= SPDIF_IN_SAMPLE | SPDIF_IN_ENB; writel(ctrl, host->io_base + SPDIF_IN_CTRL); writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ctrl = readl(host->io_base + SPDIF_IN_CTRL); ctrl &= ~(SPDIF_IN_SAMPLE | SPDIF_IN_ENB); writel(ctrl, host->io_base + SPDIF_IN_CTRL); writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK); if (host->reset_perip) host->reset_perip(); clk_disable(host->clk); break; default: ret = -EINVAL; break; } return ret; } static struct snd_soc_dai_ops spdif_in_dai_ops = { .startup = spdif_in_startup, .shutdown = spdif_in_shutdown, .trigger = spdif_in_trigger, .hw_params = spdif_in_hw_params, }; struct snd_soc_dai_driver spdif_in_dai = { .capture = { .channels_min = 2, .channels_max = 2, .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | \ SNDRV_PCM_RATE_192000), .formats = SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, }, .ops = &spdif_in_dai_ops, }; static const struct snd_soc_component_driver spdif_in_component = { .name = "spdif-in", }; static irqreturn_t spdif_in_irq(int irq, void *arg) { struct spdif_in_dev *host = (struct spdif_in_dev *)arg; u32 irq_status = readl(host->io_base + SPDIF_IN_IRQ); if (!irq_status) return IRQ_NONE; if (irq_status & SPDIF_IRQ_FIFOWRITE) dev_err(host->dev, "spdif in: fifo write error"); if (irq_status & SPDIF_IRQ_EMPTYFIFOREAD) dev_err(host->dev, "spdif in: empty fifo read error"); if (irq_status & SPDIF_IRQ_FIFOFULL) dev_err(host->dev, "spdif in: fifo full error"); if (irq_status & SPDIF_IRQ_OUTOFRANGE) dev_err(host->dev, "spdif in: out of range error"); writel(0, host->io_base + SPDIF_IN_IRQ); return IRQ_HANDLED; } static int spdif_in_probe(struct platform_device *pdev) { struct spdif_in_dev *host; struct spear_spdif_platform_data *pdata; struct resource *res, *res_fifo; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res_fifo) return -EINVAL; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { dev_warn(&pdev->dev, "Failed to get memory resourse\n"); return -ENOENT; } host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) { dev_warn(&pdev->dev, "kzalloc fail\n"); return -ENOMEM; } host->io_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!host->io_base) { dev_warn(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) return -EINVAL; host->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) return PTR_ERR(host->clk); pdata = dev_get_platdata(&pdev->dev); if (!pdata) return -EINVAL; host->dma_params.data = pdata->dma_params; host->dma_params.addr = res_fifo->start; host->dma_params.max_burst = 16; host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; host->dma_params.filter = pdata->filter; host->reset_perip = pdata->reset_perip; host->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, host); ret = devm_request_irq(&pdev->dev, host->irq, spdif_in_irq, 0, "spdif-in", host); if (ret) { clk_put(host->clk); dev_warn(&pdev->dev, "request_irq failed\n"); return ret; } ret = snd_soc_register_component(&pdev->dev, &spdif_in_component, &spdif_in_dai, 1); if (ret != 0) { clk_put(host->clk); return ret; } return 0; } static int spdif_in_remove(struct platform_device *pdev) { struct spdif_in_dev *host = dev_get_drvdata(&pdev->dev); snd_soc_unregister_component(&pdev->dev); dev_set_drvdata(&pdev->dev, NULL); clk_put(host->clk); return 0; } static struct platform_driver spdif_in_driver = { .probe = spdif_in_probe, .remove = spdif_in_remove, .driver = { .name = "spdif-in", .owner = THIS_MODULE, }, }; module_platform_driver(spdif_in_driver); MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>"); MODULE_DESCRIPTION("SPEAr SPDIF IN SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:spdif_in");
gpl-2.0
kernelhackx/kernel_hammerhead
arch/x86/kernel/ptrace.c
2342
38676
/* By Ross Biro 1/23/92 */ /* * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/tracehook.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/security.h> #include <linux/audit.h> #include <linux/seccomp.h> #include <linux/signal.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/debugreg.h> #include <asm/ldt.h> #include <asm/desc.h> #include <asm/prctl.h> #include <asm/proto.h> #include <asm/hw_breakpoint.h> #include <asm/traps.h> #include "tls.h" #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> enum x86_regset { REGSET_GENERAL, REGSET_FP, REGSET_XFP, REGSET_IOPERM64 = REGSET_XFP, REGSET_XSTATE, REGSET_TLS, REGSET_IOPERM32, }; struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { #ifdef CONFIG_X86_64 REG_OFFSET_NAME(r15), REG_OFFSET_NAME(r14), REG_OFFSET_NAME(r13), REG_OFFSET_NAME(r12), REG_OFFSET_NAME(r11), REG_OFFSET_NAME(r10), REG_OFFSET_NAME(r9), REG_OFFSET_NAME(r8), #endif REG_OFFSET_NAME(bx), REG_OFFSET_NAME(cx), REG_OFFSET_NAME(dx), REG_OFFSET_NAME(si), REG_OFFSET_NAME(di), REG_OFFSET_NAME(bp), REG_OFFSET_NAME(ax), #ifdef CONFIG_X86_32 REG_OFFSET_NAME(ds), REG_OFFSET_NAME(es), REG_OFFSET_NAME(fs), REG_OFFSET_NAME(gs), #endif REG_OFFSET_NAME(orig_ax), REG_OFFSET_NAME(ip), REG_OFFSET_NAME(cs), REG_OFFSET_NAME(flags), REG_OFFSET_NAME(sp), REG_OFFSET_NAME(ss), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } static const int arg_offs_table[] = { #ifdef CONFIG_X86_32 [0] = offsetof(struct pt_regs, ax), [1] = offsetof(struct pt_regs, dx), [2] = offsetof(struct pt_regs, cx) #else /* CONFIG_X86_64 */ [0] = offsetof(struct pt_regs, di), [1] = offsetof(struct pt_regs, si), [2] = offsetof(struct pt_regs, dx), [3] = offsetof(struct pt_regs, cx), [4] = offsetof(struct pt_regs, r8), [5] = offsetof(struct pt_regs, r9) #endif }; /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Determines which flags the user has access to [1 = access, 0 = no access]. */ #define FLAG_MASK_32 ((unsigned long) \ (X86_EFLAGS_CF | X86_EFLAGS_PF | \ X86_EFLAGS_AF | X86_EFLAGS_ZF | \ X86_EFLAGS_SF | X86_EFLAGS_TF | \ X86_EFLAGS_DF | X86_EFLAGS_OF | \ X86_EFLAGS_RF | X86_EFLAGS_AC)) /* * Determines whether a value may be installed in a segment register. */ static inline bool invalid_selector(u16 value) { return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); } #ifdef CONFIG_X86_32 #define FLAG_MASK FLAG_MASK_32 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); return &regs->bx + (regno >> 2); } static u16 get_segment_reg(struct task_struct *task, unsigned long offset) { /* * Returning the value truncates it to 16 bits. */ unsigned int retval; if (offset != offsetof(struct user_regs_struct, gs)) retval = *pt_regs_access(task_pt_regs(task), offset); else { if (task == current) retval = get_user_gs(task_pt_regs(task)); else retval = task_user_gs(task); } return retval; } static int set_segment_reg(struct task_struct *task, unsigned long offset, u16 value) { /* * The value argument was already truncated to 16 bits. */ if (invalid_selector(value)) return -EIO; /* * For %cs and %ss we cannot permit a null selector. * We can permit a bogus selector as long as it has USER_RPL. * Null selectors are fine for other segment registers, but * we will never get back to user mode with invalid %cs or %ss * and will take the trap in iret instead. Much code relies * on user_mode() to distinguish a user trap frame (which can * safely use invalid selectors) from a kernel trap frame. */ switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ss): if (unlikely(value == 0)) return -EIO; default: *pt_regs_access(task_pt_regs(task), offset) = value; break; case offsetof(struct user_regs_struct, gs): if (task == current) set_user_gs(task_pt_regs(task), value); else task_user_gs(task) = value; } return 0; } #else /* CONFIG_X86_64 */ #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) { BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); return &regs->r15 + (offset / sizeof(regs->r15)); } static u16 get_segment_reg(struct task_struct *task, unsigned long offset) { /* * Returning the value truncates it to 16 bits. */ unsigned int seg; switch (offset) { case offsetof(struct user_regs_struct, fs): if (task == current) { /* Older gas can't assemble movq %?s,%r?? */ asm("movl %%fs,%0" : "=r" (seg)); return seg; } return task->thread.fsindex; case offsetof(struct user_regs_struct, gs): if (task == current) { asm("movl %%gs,%0" : "=r" (seg)); return seg; } return task->thread.gsindex; case offsetof(struct user_regs_struct, ds): if (task == current) { asm("movl %%ds,%0" : "=r" (seg)); return seg; } return task->thread.ds; case offsetof(struct user_regs_struct, es): if (task == current) { asm("movl %%es,%0" : "=r" (seg)); return seg; } return task->thread.es; case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ss): break; } return *pt_regs_access(task_pt_regs(task), offset); } static int set_segment_reg(struct task_struct *task, unsigned long offset, u16 value) { /* * The value argument was already truncated to 16 bits. */ if (invalid_selector(value)) return -EIO; switch (offset) { case offsetof(struct user_regs_struct,fs): /* * If this is setting fs as for normal 64-bit use but * setting fs_base has implicitly changed it, leave it. */ if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && task->thread.fs != 0) || (value == 0 && task->thread.fsindex == FS_TLS_SEL && task->thread.fs == 0)) break; task->thread.fsindex = value; if (task == current) loadsegment(fs, task->thread.fsindex); break; case offsetof(struct user_regs_struct,gs): /* * If this is setting gs as for normal 64-bit use but * setting gs_base has implicitly changed it, leave it. */ if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && task->thread.gs != 0) || (value == 0 && task->thread.gsindex == GS_TLS_SEL && task->thread.gs == 0)) break; task->thread.gsindex = value; if (task == current) load_gs_index(task->thread.gsindex); break; case offsetof(struct user_regs_struct,ds): task->thread.ds = value; if (task == current) loadsegment(ds, task->thread.ds); break; case offsetof(struct user_regs_struct,es): task->thread.es = value; if (task == current) loadsegment(es, task->thread.es); break; /* * Can't actually change these in 64-bit mode. */ case offsetof(struct user_regs_struct,cs): if (unlikely(value == 0)) return -EIO; #ifdef CONFIG_IA32_EMULATION if (test_tsk_thread_flag(task, TIF_IA32)) task_pt_regs(task)->cs = value; #endif break; case offsetof(struct user_regs_struct,ss): if (unlikely(value == 0)) return -EIO; #ifdef CONFIG_IA32_EMULATION if (test_tsk_thread_flag(task, TIF_IA32)) task_pt_regs(task)->ss = value; #endif break; } return 0; } #endif /* CONFIG_X86_32 */ static unsigned long get_flags(struct task_struct *task) { unsigned long retval = task_pt_regs(task)->flags; /* * If the debugger set TF, hide it from the readout. */ if (test_tsk_thread_flag(task, TIF_FORCED_TF)) retval &= ~X86_EFLAGS_TF; return retval; } static int set_flags(struct task_struct *task, unsigned long value) { struct pt_regs *regs = task_pt_regs(task); /* * If the user value contains TF, mark that * it was not "us" (the debugger) that set it. * If not, make sure it stays set if we had. */ if (value & X86_EFLAGS_TF) clear_tsk_thread_flag(task, TIF_FORCED_TF); else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) value |= X86_EFLAGS_TF; regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); return 0; } static int putreg(struct task_struct *child, unsigned long offset, unsigned long value) { switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ds): case offsetof(struct user_regs_struct, es): case offsetof(struct user_regs_struct, fs): case offsetof(struct user_regs_struct, gs): case offsetof(struct user_regs_struct, ss): return set_segment_reg(child, offset, value); case offsetof(struct user_regs_struct, flags): return set_flags(child, value); #ifdef CONFIG_X86_64 case offsetof(struct user_regs_struct,fs_base): if (value >= TASK_SIZE_OF(child)) return -EIO; /* * When changing the segment base, use do_arch_prctl * to set either thread.fs or thread.fsindex and the * corresponding GDT slot. */ if (child->thread.fs != value) return do_arch_prctl(child, ARCH_SET_FS, value); return 0; case offsetof(struct user_regs_struct,gs_base): /* * Exactly the same here as the %fs handling above. */ if (value >= TASK_SIZE_OF(child)) return -EIO; if (child->thread.gs != value) return do_arch_prctl(child, ARCH_SET_GS, value); return 0; #endif } *pt_regs_access(task_pt_regs(child), offset) = value; return 0; } static unsigned long getreg(struct task_struct *task, unsigned long offset) { switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ds): case offsetof(struct user_regs_struct, es): case offsetof(struct user_regs_struct, fs): case offsetof(struct user_regs_struct, gs): case offsetof(struct user_regs_struct, ss): return get_segment_reg(task, offset); case offsetof(struct user_regs_struct, flags): return get_flags(task); #ifdef CONFIG_X86_64 case offsetof(struct user_regs_struct, fs_base): { /* * do_arch_prctl may have used a GDT slot instead of * the MSR. To userland, it appears the same either * way, except the %fs segment selector might not be 0. */ unsigned int seg = task->thread.fsindex; if (task->thread.fs != 0) return task->thread.fs; if (task == current) asm("movl %%fs,%0" : "=r" (seg)); if (seg != FS_TLS_SEL) return 0; return get_desc_base(&task->thread.tls_array[FS_TLS]); } case offsetof(struct user_regs_struct, gs_base): { /* * Exactly the same here as the %fs handling above. */ unsigned int seg = task->thread.gsindex; if (task->thread.gs != 0) return task->thread.gs; if (task == current) asm("movl %%gs,%0" : "=r" (seg)); if (seg != GS_TLS_SEL) return 0; return get_desc_base(&task->thread.tls_array[GS_TLS]); } #endif } return *pt_regs_access(task_pt_regs(task), offset); } static int genregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (kbuf) { unsigned long *k = kbuf; while (count >= sizeof(*k)) { *k++ = getreg(target, pos); count -= sizeof(*k); pos += sizeof(*k); } } else { unsigned long __user *u = ubuf; while (count >= sizeof(*u)) { if (__put_user(getreg(target, pos), u++)) return -EFAULT; count -= sizeof(*u); pos += sizeof(*u); } } return 0; } static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret = 0; if (kbuf) { const unsigned long *k = kbuf; while (count >= sizeof(*k) && !ret) { ret = putreg(target, pos, *k++); count -= sizeof(*k); pos += sizeof(*k); } } else { const unsigned long __user *u = ubuf; while (count >= sizeof(*u) && !ret) { unsigned long word; ret = __get_user(word, u++); if (ret) break; ret = putreg(target, pos, word); count -= sizeof(*u); pos += sizeof(*u); } } return ret; } static void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { int i; struct thread_struct *thread = &(current->thread); /* * Store in the virtual DR6 register the fact that the breakpoint * was hit so the thread's debugger will see it. */ for (i = 0; i < HBP_NUM; i++) { if (thread->ptrace_bps[i] == bp) break; } thread->debugreg6 |= (DR_TRAP0 << i); } /* * Walk through every ptrace breakpoints for this thread and * build the dr7 value on top of their attributes. * */ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) { int i; int dr7 = 0; struct arch_hw_breakpoint *info; for (i = 0; i < HBP_NUM; i++) { if (bp[i] && !bp[i]->attr.disabled) { info = counter_arch_bp(bp[i]); dr7 |= encode_dr7(i, info->len, info->type); } } return dr7; } static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, struct task_struct *tsk, int disabled) { int err; int gen_len, gen_type; struct perf_event_attr attr; /* * We should have at least an inactive breakpoint at this * slot. It means the user is writing dr7 without having * written the address register first */ if (!bp) return -EINVAL; err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); if (err) return err; attr = bp->attr; attr.bp_len = gen_len; attr.bp_type = gen_type; attr.disabled = disabled; return modify_user_hw_breakpoint(bp, &attr); } /* * Handle ptrace writes to debug register 7. */ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) { struct thread_struct *thread = &(tsk->thread); unsigned long old_dr7; int i, orig_ret = 0, rc = 0; int enabled, second_pass = 0; unsigned len, type; struct perf_event *bp; if (ptrace_get_breakpoints(tsk) < 0) return -ESRCH; data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: /* * Loop through all the hardware breakpoints, making the * appropriate changes to each. */ for (i = 0; i < HBP_NUM; i++) { enabled = decode_dr7(data, i, &len, &type); bp = thread->ptrace_bps[i]; if (!enabled) { if (bp) { /* * Don't unregister the breakpoints right-away, * unless all register_user_hw_breakpoint() * requests have succeeded. This prevents * any window of opportunity for debug * register grabbing by other users. */ if (!second_pass) continue; rc = ptrace_modify_breakpoint(bp, len, type, tsk, 1); if (rc) break; } continue; } rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); if (rc) break; } /* * Make a second pass to free the remaining unused breakpoints * or to restore the original breakpoints if an error occurred. */ if (!second_pass) { second_pass = 1; if (rc < 0) { orig_ret = rc; data = old_dr7; } goto restore; } ptrace_put_breakpoints(tsk); return ((orig_ret < 0) ? orig_ret : rc); } /* * Handle PTRACE_PEEKUSR calls for the debug register area. */ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) { struct thread_struct *thread = &(tsk->thread); unsigned long val = 0; if (n < HBP_NUM) { struct perf_event *bp; if (ptrace_get_breakpoints(tsk) < 0) return -ESRCH; bp = thread->ptrace_bps[n]; if (!bp) val = 0; else val = bp->hw.info.address; ptrace_put_breakpoints(tsk); } else if (n == 6) { val = thread->debugreg6; } else if (n == 7) { val = thread->ptrace_dr7; } return val; } static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, unsigned long addr) { struct perf_event *bp; struct thread_struct *t = &tsk->thread; struct perf_event_attr attr; int err = 0; if (ptrace_get_breakpoints(tsk) < 0) return -ESRCH; if (!t->ptrace_bps[nr]) { ptrace_breakpoint_init(&attr); /* * Put stub len and type to register (reserve) an inactive but * correct bp */ attr.bp_addr = addr; attr.bp_len = HW_BREAKPOINT_LEN_1; attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, tsk); /* * CHECKME: the previous code returned -EIO if the addr wasn't * a valid task virtual addr. The new one will return -EINVAL in * this case. * -EINVAL may be what we want for in-kernel breakpoints users, * but -EIO looks better for ptrace, since we refuse a register * writing for the user. And anyway this is the previous * behaviour. */ if (IS_ERR(bp)) { err = PTR_ERR(bp); goto put; } t->ptrace_bps[nr] = bp; } else { bp = t->ptrace_bps[nr]; attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); } put: ptrace_put_breakpoints(tsk); return err; } /* * Handle PTRACE_POKEUSR calls for the debug register area. */ static int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) { struct thread_struct *thread = &(tsk->thread); int rc = 0; /* There are no DR4 or DR5 registers */ if (n == 4 || n == 5) return -EIO; if (n == 6) { thread->debugreg6 = val; goto ret_path; } if (n < HBP_NUM) { rc = ptrace_set_breakpoint_addr(tsk, n, val); if (rc) return rc; } /* All that's left is DR7 */ if (n == 7) { rc = ptrace_write_dr7(tsk, val); if (!rc) thread->ptrace_dr7 = val; } ret_path: return rc; } /* * These access the current or another (stopped) task's io permission * bitmap for debugging or core dump. */ static int ioperm_active(struct task_struct *target, const struct user_regset *regset) { return target->thread.io_bitmap_max / regset->size; } static int ioperm_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (!target->thread.io_bitmap_ptr) return -ENXIO; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, target->thread.io_bitmap_ptr, 0, IO_BITMAP_BYTES); } /* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set. */ void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif } #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION static const struct user_regset_view user_x86_32_view; /* Initialized below. */ #endif long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *)data; switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long tmp; ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break; tmp = 0; /* Default return condition */ if (addr < sizeof(struct user_regs_struct)) tmp = getreg(child, addr); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); tmp = ptrace_get_debugreg(child, addr / sizeof(data)); } ret = put_user(tmp, datap); break; } case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break; if (addr < sizeof(struct user_regs_struct)) ret = putreg(child, addr, data); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); ret = ptrace_set_debugreg(child, addr / sizeof(data), data); } break; case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); #ifdef CONFIG_X86_32 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user_fxsr_struct), datap) ? -EIO : 0; case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ return copy_regset_from_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user_fxsr_struct), datap) ? -EIO : 0; #endif #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION case PTRACE_GET_THREAD_AREA: if ((int) addr < 0) return -EIO; ret = do_get_thread_area(child, addr, (struct user_desc __user *)data); break; case PTRACE_SET_THREAD_AREA: if ((int) addr < 0) return -EIO; ret = do_set_thread_area(child, addr, (struct user_desc __user *)data, 0); break; #endif #ifdef CONFIG_X86_64 /* normal 64bit interface to access TLS data. Works just like arch_prctl, except that the arguments are reversed. */ case PTRACE_ARCH_PRCTL: ret = do_arch_prctl(child, data, addr); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } #ifdef CONFIG_IA32_EMULATION #include <linux/compat.h> #include <linux/syscalls.h> #include <asm/ia32.h> #include <asm/user32.h> #define R32(l,q) \ case offsetof(struct user32, regs.l): \ regs->q = value; break #define SEG32(rs) \ case offsetof(struct user32, regs.rs): \ return set_segment_reg(child, \ offsetof(struct user_regs_struct, rs), \ value); \ break static int putreg32(struct task_struct *child, unsigned regno, u32 value) { struct pt_regs *regs = task_pt_regs(child); switch (regno) { SEG32(cs); SEG32(ds); SEG32(es); SEG32(fs); SEG32(gs); SEG32(ss); R32(ebx, bx); R32(ecx, cx); R32(edx, dx); R32(edi, di); R32(esi, si); R32(ebp, bp); R32(eax, ax); R32(eip, ip); R32(esp, sp); case offsetof(struct user32, regs.orig_eax): /* * A 32-bit debugger setting orig_eax means to restore * the state of the task restarting a 32-bit syscall. * Make sure we interpret the -ERESTART* codes correctly * in case the task is not actually still sitting at the * exit from a 32-bit syscall with TS_COMPAT still set. */ regs->orig_ax = value; if (syscall_get_nr(child, regs) >= 0) task_thread_info(child)->status |= TS_COMPAT; break; case offsetof(struct user32, regs.eflags): return set_flags(child, value); case offsetof(struct user32, u_debugreg[0]) ... offsetof(struct user32, u_debugreg[7]): regno -= offsetof(struct user32, u_debugreg[0]); return ptrace_set_debugreg(child, regno / 4, value); default: if (regno > sizeof(struct user32) || (regno & 3)) return -EIO; /* * Other dummy fields in the virtual user structure * are ignored */ break; } return 0; } #undef R32 #undef SEG32 #define R32(l,q) \ case offsetof(struct user32, regs.l): \ *val = regs->q; break #define SEG32(rs) \ case offsetof(struct user32, regs.rs): \ *val = get_segment_reg(child, \ offsetof(struct user_regs_struct, rs)); \ break static int getreg32(struct task_struct *child, unsigned regno, u32 *val) { struct pt_regs *regs = task_pt_regs(child); switch (regno) { SEG32(ds); SEG32(es); SEG32(fs); SEG32(gs); R32(cs, cs); R32(ss, ss); R32(ebx, bx); R32(ecx, cx); R32(edx, dx); R32(edi, di); R32(esi, si); R32(ebp, bp); R32(eax, ax); R32(orig_eax, orig_ax); R32(eip, ip); R32(esp, sp); case offsetof(struct user32, regs.eflags): *val = get_flags(child); break; case offsetof(struct user32, u_debugreg[0]) ... offsetof(struct user32, u_debugreg[7]): regno -= offsetof(struct user32, u_debugreg[0]); *val = ptrace_get_debugreg(child, regno / 4); break; default: if (regno > sizeof(struct user32) || (regno & 3)) return -EIO; /* * Other dummy fields in the virtual user structure * are ignored */ *val = 0; break; } return 0; } #undef R32 #undef SEG32 static int genregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (kbuf) { compat_ulong_t *k = kbuf; while (count >= sizeof(*k)) { getreg32(target, pos, k++); count -= sizeof(*k); pos += sizeof(*k); } } else { compat_ulong_t __user *u = ubuf; while (count >= sizeof(*u)) { compat_ulong_t word; getreg32(target, pos, &word); if (__put_user(word, u++)) return -EFAULT; count -= sizeof(*u); pos += sizeof(*u); } } return 0; } static int genregs32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret = 0; if (kbuf) { const compat_ulong_t *k = kbuf; while (count >= sizeof(*k) && !ret) { ret = putreg32(target, pos, *k++); count -= sizeof(*k); pos += sizeof(*k); } } else { const compat_ulong_t __user *u = ubuf; while (count >= sizeof(*u) && !ret) { compat_ulong_t word; ret = __get_user(word, u++); if (ret) break; ret = putreg32(target, pos, word); count -= sizeof(*u); pos += sizeof(*u); } } return ret; } #ifdef CONFIG_X86_X32_ABI static long x32_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; void __user *datap = compat_ptr(data); int ret; switch (request) { /* Read 32bits at location addr in the USER area. Only allow to return the lower 32bits of segment and debug registers. */ case PTRACE_PEEKUSR: { u32 tmp; ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || addr < offsetof(struct user_regs_struct, cs)) break; tmp = 0; /* Default return condition */ if (addr < sizeof(struct user_regs_struct)) tmp = getreg(child, addr); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); tmp = ptrace_get_debugreg(child, addr / sizeof(data)); } ret = put_user(tmp, (__u32 __user *)datap); break; } /* Write the word at location addr in the USER area. Only allow to update segment and debug registers with the upper 32bits zero-extended. */ case PTRACE_POKEUSR: ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || addr < offsetof(struct user_regs_struct, cs)) break; if (addr < sizeof(struct user_regs_struct)) ret = putreg(child, addr, data); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); ret = ptrace_set_debugreg(child, addr / sizeof(data), data); } break; case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); /* normal 64bit interface to access TLS data. Works just like arch_prctl, except that the arguments are reversed. */ case PTRACE_ARCH_PRCTL: return do_arch_prctl(child, data, addr); default: return compat_ptrace_request(child, request, addr, data); } return ret; } #endif long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; void __user *datap = compat_ptr(data); int ret; __u32 val; #ifdef CONFIG_X86_X32_ABI if (!is_ia32_task()) return x32_arch_ptrace(child, request, caddr, cdata); #endif switch (request) { case PTRACE_PEEKUSR: ret = getreg32(child, addr, &val); if (ret == 0) ret = put_user(val, (__u32 __user *)datap); break; case PTRACE_POKEUSR: ret = putreg32(child, addr, data); break; case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_GENERAL, 0, sizeof(struct user_regs_struct32), datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, &user_x86_32_view, REGSET_GENERAL, 0, sizeof(struct user_regs_struct32), datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_FP, 0, sizeof(struct user_i387_ia32_struct), datap); case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user( child, &user_x86_32_view, REGSET_FP, 0, sizeof(struct user_i387_ia32_struct), datap); case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user32_fxsr_struct), datap); case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ return copy_regset_from_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user32_fxsr_struct), datap); case PTRACE_GET_THREAD_AREA: case PTRACE_SET_THREAD_AREA: return arch_ptrace(child, request, addr, data); default: return compat_ptrace_request(child, request, addr, data); } return ret; } #endif /* CONFIG_IA32_EMULATION */ #ifdef CONFIG_X86_64 static struct user_regset x86_64_regsets[] __read_mostly = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_regs_struct) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .get = genregs_get, .set = genregs_set }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = sizeof(struct user_i387_struct) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set }, [REGSET_XSTATE] = { .core_note_type = NT_X86_XSTATE, .size = sizeof(u64), .align = sizeof(u64), .active = xstateregs_active, .get = xstateregs_get, .set = xstateregs_set }, [REGSET_IOPERM64] = { .core_note_type = NT_386_IOPERM, .n = IO_BITMAP_LONGS, .size = sizeof(long), .align = sizeof(long), .active = ioperm_active, .get = ioperm_get }, }; static const struct user_regset_view user_x86_64_view = { .name = "x86_64", .e_machine = EM_X86_64, .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) }; #else /* CONFIG_X86_32 */ #define user_regs_struct32 user_regs_struct #define genregs32_get genregs_get #define genregs32_set genregs_set #define user_i387_ia32_struct user_i387_struct #define user32_fxsr_struct user_fxsr_struct #endif /* CONFIG_X86_64 */ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION static struct user_regset x86_32_regsets[] __read_mostly = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_regs_struct32) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = genregs32_get, .set = genregs32_set }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = fpregs_active, .get = fpregs_get, .set = fpregs_set }, [REGSET_XFP] = { .core_note_type = NT_PRXFPREG, .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set }, [REGSET_XSTATE] = { .core_note_type = NT_X86_XSTATE, .size = sizeof(u64), .align = sizeof(u64), .active = xstateregs_active, .get = xstateregs_get, .set = xstateregs_set }, [REGSET_TLS] = { .core_note_type = NT_386_TLS, .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, .size = sizeof(struct user_desc), .align = sizeof(struct user_desc), .active = regset_tls_active, .get = regset_tls_get, .set = regset_tls_set }, [REGSET_IOPERM32] = { .core_note_type = NT_386_IOPERM, .n = IO_BITMAP_BYTES / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = ioperm_active, .get = ioperm_get }, }; static const struct user_regset_view user_x86_32_view = { .name = "i386", .e_machine = EM_386, .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) }; #endif /* * This represents bytes 464..511 in the memory layout exported through * the REGSET_XSTATE interface. */ u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; void update_regset_xstate_info(unsigned int size, u64 xstate_mask) { #ifdef CONFIG_X86_64 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); #endif #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64); #endif xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask; } const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_IA32_EMULATION if (test_tsk_thread_flag(task, TIF_IA32)) #endif #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION return &user_x86_32_view; #endif #ifdef CONFIG_X86_64 return &user_x86_64_view; #endif } static void fill_sigtrap_info(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code, struct siginfo *info) { tsk->thread.trap_nr = X86_TRAP_DB; tsk->thread.error_code = error_code; memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; } void user_single_step_siginfo(struct task_struct *tsk, struct pt_regs *regs, struct siginfo *info) { fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info); } void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code) { struct siginfo info; fill_sigtrap_info(tsk, regs, error_code, si_code, &info); /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } #ifdef CONFIG_X86_32 # define IS_IA32 1 #elif defined CONFIG_IA32_EMULATION # define IS_IA32 is_compat_task() #else # define IS_IA32 0 #endif /* * We must return the syscall number to actually look up in the table. * This can be -1L to skip running any syscall at all. */ long syscall_trace_enter(struct pt_regs *regs) { long ret = 0; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state. If we entered on the slow path, TF was already set. */ if (test_thread_flag(TIF_SINGLESTEP)) regs->flags |= X86_EFLAGS_TF; /* do the secure computing check first */ secure_computing(regs->orig_ax); if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) ret = -1L; if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && tracehook_report_syscall_entry(regs)) ret = -1L; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->orig_ax); if (IS_IA32) audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax, regs->bx, regs->cx, regs->dx, regs->si); #ifdef CONFIG_X86_64 else audit_syscall_entry(AUDIT_ARCH_X86_64, regs->orig_ax, regs->di, regs->si, regs->dx, regs->r10); #endif return ret ?: regs->orig_ax; } void syscall_trace_leave(struct pt_regs *regs) { bool step; audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->ax); /* * If TIF_SYSCALL_EMU is set, we only get here because of * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). * We already reported this syscall instruction in * syscall_trace_enter(). */ step = unlikely(test_thread_flag(TIF_SINGLESTEP)) && !test_thread_flag(TIF_SYSCALL_EMU); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); }
gpl-2.0
mazen912/rk30_r-box_kernel
drivers/staging/iio/resolver/ad2s120x.c
2342
7432
/* * ad2s120x.c simple support for the ADI Resolver to Digital Converters: AD2S1200/1205 * * Copyright (c) 2010-2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include "../iio.h" #include "../sysfs.h" #define DRV_NAME "ad2s120x" /* input pin sample and rdvel is controlled by driver */ #define AD2S120X_PN 2 /* input clock on serial interface */ #define AD2S120X_HZ 8192000 /* clock period in nano second */ #define AD2S120X_TSCLK (1000000000/AD2S120X_HZ) struct ad2s120x_state { struct mutex lock; struct iio_dev *idev; struct spi_device *sdev; unsigned short sample; unsigned short rdvel; u8 rx[2]; u8 tx[2]; }; static ssize_t ad2s120x_show_pos_vel(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; u16 pos; s16 vel; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len = sprintf(buf, "%d %c%c%c%c ", pos, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); /* delay 18 ns */ /* ndelay(18); */ gpio_set_value(st->rdvel, 0); /* ndelay(5);*/ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; vel = (st->rx[0] & 0x80) ? 0xf000 : 0; vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len += sprintf(buf + len, "%d %c%c%c%c\n", vel, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: gpio_set_value(st->rdvel, 1); /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t ad2s120x_show_pos(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; u16 pos; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); gpio_set_value(st->rdvel, 1); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len = sprintf(buf, "%d %c%c%c%c ", pos, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t ad2s120x_show_vel(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; s16 vel; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); gpio_set_value(st->rdvel, 0); /* ndelay(5);*/ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; vel = (st->rx[0] & 0x80) ? 0xf000 : 0; vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len += sprintf(buf + len, "%d %c%c%c%c\n", vel, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: gpio_set_value(st->rdvel, 1); /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static IIO_CONST_ATTR(description, "12-Bit R/D Converter with Reference Oscillator"); static IIO_DEVICE_ATTR(pos_vel, S_IRUGO, ad2s120x_show_pos_vel, NULL, 0); static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_pos, NULL, 0); static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_vel, NULL, 0); static struct attribute *ad2s120x_attributes[] = { &iio_const_attr_description.dev_attr.attr, &iio_dev_attr_pos_vel.dev_attr.attr, &iio_dev_attr_pos.dev_attr.attr, &iio_dev_attr_vel.dev_attr.attr, NULL, }; static const struct attribute_group ad2s120x_attribute_group = { .attrs = ad2s120x_attributes, }; static const struct iio_info ad2s120x_info = { .attrs = &ad2s120x_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad2s120x_probe(struct spi_device *spi) { struct ad2s120x_state *st; int pn, ret = 0; unsigned short *pins = spi->dev.platform_data; for (pn = 0; pn < AD2S120X_PN; pn++) { if (gpio_request(pins[pn], DRV_NAME)) { pr_err("%s: request gpio pin %d failed\n", DRV_NAME, pins[pn]); goto error_ret; } gpio_direction_output(pins[pn], 1); } st = kzalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_ret; } spi_set_drvdata(spi, st); mutex_init(&st->lock); st->sdev = spi; st->sample = pins[0]; st->rdvel = pins[1]; st->idev = iio_allocate_device(0); if (st->idev == NULL) { ret = -ENOMEM; goto error_free_st; } st->idev->dev.parent = &spi->dev; st->idev->info = &ad2s120x_info; st->idev->dev_data = (void *)(st); st->idev->modes = INDIO_DIRECT_MODE; ret = iio_device_register(st->idev); if (ret) goto error_free_dev; spi->max_speed_hz = AD2S120X_HZ; spi->mode = SPI_MODE_3; spi_setup(spi); return 0; error_free_dev: iio_free_device(st->idev); error_free_st: kfree(st); error_ret: for (--pn; pn >= 0; pn--) gpio_free(pins[pn]); return ret; } static int __devexit ad2s120x_remove(struct spi_device *spi) { struct ad2s120x_state *st = spi_get_drvdata(spi); iio_device_unregister(st->idev); kfree(st); return 0; } static struct spi_driver ad2s120x_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ad2s120x_probe, .remove = __devexit_p(ad2s120x_remove), }; static __init int ad2s120x_spi_init(void) { return spi_register_driver(&ad2s120x_driver); } module_init(ad2s120x_spi_init); static __exit void ad2s120x_spi_exit(void) { spi_unregister_driver(&ad2s120x_driver); } module_exit(ad2s120x_spi_exit); MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>"); MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TheSSJ/android_kernel_asus_moorefield
arch/arm/mach-integrator/pci.c
2342
2985
/* * linux/arch/arm/mach-integrator/pci-integrator.c * * Copyright (C) 1999 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * PCI functions for Integrator */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/init.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> #include <mach/irqs.h> /* * A small note about bridges and interrupts. The DECchip 21050 (and * later) adheres to the PCI-PCI bridge specification. This says that * the interrupts on the other side of a bridge are swizzled in the * following manner: * * Dev Interrupt Interrupt * Pin on Pin on * Device Connector * * 4 A A * B B * C C * D D * * 5 A B * B C * C D * D A * * 6 A C * B D * C A * D B * * 7 A D * B A * C B * D C * * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A. * Thus, each swizzle is ((pin-1) + (device#-4)) % 4 */ /* * This routine handles multiple bridges. */ static u8 __init integrator_swizzle(struct pci_dev *dev, u8 *pinp) { if (*pinp == 0) *pinp = 1; return pci_common_swizzle(dev, pinp); } static int irq_tab[4] __initdata = { IRQ_AP_PCIINT0, IRQ_AP_PCIINT1, IRQ_AP_PCIINT2, IRQ_AP_PCIINT3 }; /* * map the specified device/slot/pin to an IRQ. This works out such * that slot 9 pin 1 is INT0, pin 2 is INT1, and slot 10 pin 1 is INT1. */ static int __init integrator_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int intnr = ((slot - 9) + (pin - 1)) & 3; return irq_tab[intnr]; } extern void pci_v3_init(void *); static struct hw_pci integrator_pci __initdata = { .swizzle = integrator_swizzle, .map_irq = integrator_map_irq, .setup = pci_v3_setup, .nr_controllers = 1, .ops = &pci_v3_ops, .preinit = pci_v3_preinit, .postinit = pci_v3_postinit, }; static int __init integrator_pci_init(void) { if (machine_is_integrator()) pci_common_init(&integrator_pci); return 0; } subsys_initcall(integrator_pci_init);
gpl-2.0
yanghy/evo_ics
arch/x86/kernel/e820.c
2854
28563
/* * Handle the memory map. * The functions here do the job until bootmem takes over. * * Getting sanitize_e820_map() in sync with i386 version by applying change: * - Provisions for empty E820 memory regions (reported by certain BIOSes). * Alex Achenbach <xela@slit.de>, December 2002. * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/crash_dump.h> #include <linux/bootmem.h> #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/acpi.h> #include <linux/firmware-map.h> #include <linux/memblock.h> #include <asm/e820.h> #include <asm/proto.h> #include <asm/setup.h> /* * The e820 map is the map that gets modified e.g. with command line parameters * and that is also registered with modifications in the kernel resource tree * with the iomem_resource as parent. * * The e820_saved is directly saved after the BIOS-provided memory map is * copied. It doesn't get modified afterwards. It's registered for the * /sys/firmware/memmap interface. * * That memory map is not modified and is used as base for kexec. The kexec'd * kernel should get the same memory map as the firmware provides. Then the * user can e.g. boot the original kernel with mem=1G while still booting the * next kernel with full memory. */ struct e820map e820; struct e820map e820_saved; /* For PCI or other memory-mapped resources */ unsigned long pci_mem_start = 0xaeedbabe; #ifdef CONFIG_PCI EXPORT_SYMBOL(pci_mem_start); #endif /* * This function checks if any part of the range <start,end> is mapped * with type. */ int e820_any_mapped(u64 start, u64 end, unsigned type) { int i; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; if (type && ei->type != type) continue; if (ei->addr >= end || ei->addr + ei->size <= start) continue; return 1; } return 0; } EXPORT_SYMBOL_GPL(e820_any_mapped); /* * This function checks if the entire range <start,end> is mapped with type. * * Note: this function only works correct if the e820 table is sorted and * not-overlapping, which is the case */ int __init e820_all_mapped(u64 start, u64 end, unsigned type) { int i; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; if (type && ei->type != type) continue; /* is the region (part) in overlap with the current region ?*/ if (ei->addr >= end || ei->addr + ei->size <= start) continue; /* if the region is at the beginning of <start,end> we move * start to the end of the region since it's ok until there */ if (ei->addr <= start) start = ei->addr + ei->size; /* * if start is now at or beyond end, we're done, full * coverage */ if (start >= end) return 1; } return 0; } /* * Add a memory region to the kernel e820 map. */ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size, int type) { int x = e820x->nr_map; if (x >= ARRAY_SIZE(e820x->map)) { printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); return; } e820x->map[x].addr = start; e820x->map[x].size = size; e820x->map[x].type = type; e820x->nr_map++; } void __init e820_add_region(u64 start, u64 size, int type) { __e820_add_region(&e820, start, size, type); } static void __init e820_print_type(u32 type) { switch (type) { case E820_RAM: case E820_RESERVED_KERN: printk(KERN_CONT "(usable)"); break; case E820_RESERVED: printk(KERN_CONT "(reserved)"); break; case E820_ACPI: printk(KERN_CONT "(ACPI data)"); break; case E820_NVS: printk(KERN_CONT "(ACPI NVS)"); break; case E820_UNUSABLE: printk(KERN_CONT "(unusable)"); break; default: printk(KERN_CONT "type %u", type); break; } } void __init e820_print_map(char *who) { int i; for (i = 0; i < e820.nr_map; i++) { printk(KERN_INFO " %s: %016Lx - %016Lx ", who, (unsigned long long) e820.map[i].addr, (unsigned long long) (e820.map[i].addr + e820.map[i].size)); e820_print_type(e820.map[i].type); printk(KERN_CONT "\n"); } } /* * Sanitize the BIOS e820 map. * * Some e820 responses include overlapping entries. The following * replaces the original e820 map with a new one, removing overlaps, * and resolving conflicting memory types in favor of highest * numbered type. * * The input parameter biosmap points to an array of 'struct * e820entry' which on entry has elements in the range [0, *pnr_map) * valid, and which has space for up to max_nr_map entries. * On return, the resulting sanitized e820 map entries will be in * overwritten in the same location, starting at biosmap. * * The integer pointed to by pnr_map must be valid on entry (the * current number of valid entries located at biosmap) and will * be updated on return, with the new number of valid entries * (something no more than max_nr_map.) * * The return value from sanitize_e820_map() is zero if it * successfully 'sanitized' the map entries passed in, and is -1 * if it did nothing, which can happen if either of (1) it was * only passed one map entry, or (2) any of the input map entries * were invalid (start + size < start, meaning that the size was * so big the described memory range wrapped around through zero.) * * Visually we're performing the following * (1,2,3,4 = memory types)... * * Sample memory map (w/overlaps): * ____22__________________ * ______________________4_ * ____1111________________ * _44_____________________ * 11111111________________ * ____________________33__ * ___________44___________ * __________33333_________ * ______________22________ * ___________________2222_ * _________111111111______ * _____________________11_ * _________________4______ * * Sanitized equivalent (no overlap): * 1_______________________ * _44_____________________ * ___1____________________ * ____22__________________ * ______11________________ * _________1______________ * __________3_____________ * ___________44___________ * _____________33_________ * _______________2________ * ________________1_______ * _________________4______ * ___________________2____ * ____________________33__ * ______________________4_ */ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map) { struct change_member { struct e820entry *pbios; /* pointer to original bios entry */ unsigned long long addr; /* address for this change point */ }; static struct change_member change_point_list[2*E820_X_MAX] __initdata; static struct change_member *change_point[2*E820_X_MAX] __initdata; static struct e820entry *overlap_list[E820_X_MAX] __initdata; static struct e820entry new_bios[E820_X_MAX] __initdata; struct change_member *change_tmp; unsigned long current_type, last_type; unsigned long long last_addr; int chgidx, still_changing; int overlap_entries; int new_bios_entry; int old_nr, new_nr, chg_nr; int i; /* if there's only one memory region, don't bother */ if (*pnr_map < 2) return -1; old_nr = *pnr_map; BUG_ON(old_nr > max_nr_map); /* bail out if we find any unreasonable addresses in bios map */ for (i = 0; i < old_nr; i++) if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) return -1; /* create pointers for initial change-point information (for sorting) */ for (i = 0; i < 2 * old_nr; i++) change_point[i] = &change_point_list[i]; /* record all known change-points (starting and ending addresses), omitting those that are for empty memory regions */ chgidx = 0; for (i = 0; i < old_nr; i++) { if (biosmap[i].size != 0) { change_point[chgidx]->addr = biosmap[i].addr; change_point[chgidx++]->pbios = &biosmap[i]; change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; change_point[chgidx++]->pbios = &biosmap[i]; } } chg_nr = chgidx; /* sort change-point list by memory addresses (low -> high) */ still_changing = 1; while (still_changing) { still_changing = 0; for (i = 1; i < chg_nr; i++) { unsigned long long curaddr, lastaddr; unsigned long long curpbaddr, lastpbaddr; curaddr = change_point[i]->addr; lastaddr = change_point[i - 1]->addr; curpbaddr = change_point[i]->pbios->addr; lastpbaddr = change_point[i - 1]->pbios->addr; /* * swap entries, when: * * curaddr > lastaddr or * curaddr == lastaddr and curaddr == curpbaddr and * lastaddr != lastpbaddr */ if (curaddr < lastaddr || (curaddr == lastaddr && curaddr == curpbaddr && lastaddr != lastpbaddr)) { change_tmp = change_point[i]; change_point[i] = change_point[i-1]; change_point[i-1] = change_tmp; still_changing = 1; } } } /* create a new bios memory map, removing overlaps */ overlap_entries = 0; /* number of entries in the overlap table */ new_bios_entry = 0; /* index for creating new bios map entries */ last_type = 0; /* start with undefined memory type */ last_addr = 0; /* start with 0 as last starting address */ /* loop through change-points, determining affect on the new bios map */ for (chgidx = 0; chgidx < chg_nr; chgidx++) { /* keep track of all overlapping bios entries */ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) { /* * add map entry to overlap list (> 1 entry * implies an overlap) */ overlap_list[overlap_entries++] = change_point[chgidx]->pbios; } else { /* * remove entry from list (order independent, * so swap with last) */ for (i = 0; i < overlap_entries; i++) { if (overlap_list[i] == change_point[chgidx]->pbios) overlap_list[i] = overlap_list[overlap_entries-1]; } overlap_entries--; } /* * if there are overlapping entries, decide which * "type" to use (larger value takes precedence -- * 1=usable, 2,3,4,4+=unusable) */ current_type = 0; for (i = 0; i < overlap_entries; i++) if (overlap_list[i]->type > current_type) current_type = overlap_list[i]->type; /* * continue building up new bios map based on this * information */ if (current_type != last_type) { if (last_type != 0) { new_bios[new_bios_entry].size = change_point[chgidx]->addr - last_addr; /* * move forward only if the new size * was non-zero */ if (new_bios[new_bios_entry].size != 0) /* * no more space left for new * bios entries ? */ if (++new_bios_entry >= max_nr_map) break; } if (current_type != 0) { new_bios[new_bios_entry].addr = change_point[chgidx]->addr; new_bios[new_bios_entry].type = current_type; last_addr = change_point[chgidx]->addr; } last_type = current_type; } } /* retain count for new bios entries */ new_nr = new_bios_entry; /* copy new bios mapping into original location */ memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); *pnr_map = new_nr; return 0; } static int __init __append_e820_map(struct e820entry *biosmap, int nr_map) { while (nr_map) { u64 start = biosmap->addr; u64 size = biosmap->size; u64 end = start + size; u32 type = biosmap->type; /* Overflow in 64 bits? Ignore the memory map. */ if (start > end) return -1; e820_add_region(start, size, type); biosmap++; nr_map--; } return 0; } /* * Copy the BIOS e820 map into a safe place. * * Sanity-check it while we're at it.. * * If we're lucky and live on a modern system, the setup code * will have given us a memory map that we can use to properly * set up memory. If we aren't, we'll fake a memory map. */ static int __init append_e820_map(struct e820entry *biosmap, int nr_map) { /* Only one memory region (or negative)? Ignore it */ if (nr_map < 2) return -1; return __append_e820_map(biosmap, nr_map); } static u64 __init __e820_update_range(struct e820map *e820x, u64 start, u64 size, unsigned old_type, unsigned new_type) { u64 end; unsigned int i; u64 real_updated_size = 0; BUG_ON(old_type == new_type); if (size > (ULLONG_MAX - start)) size = ULLONG_MAX - start; end = start + size; printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ", (unsigned long long) start, (unsigned long long) end); e820_print_type(old_type); printk(KERN_CONT " ==> "); e820_print_type(new_type); printk(KERN_CONT "\n"); for (i = 0; i < e820x->nr_map; i++) { struct e820entry *ei = &e820x->map[i]; u64 final_start, final_end; u64 ei_end; if (ei->type != old_type) continue; ei_end = ei->addr + ei->size; /* totally covered by new range? */ if (ei->addr >= start && ei_end <= end) { ei->type = new_type; real_updated_size += ei->size; continue; } /* new range is totally covered? */ if (ei->addr < start && ei_end > end) { __e820_add_region(e820x, start, size, new_type); __e820_add_region(e820x, end, ei_end - end, ei->type); ei->size = start - ei->addr; real_updated_size += size; continue; } /* partially covered */ final_start = max(start, ei->addr); final_end = min(end, ei_end); if (final_start >= final_end) continue; __e820_add_region(e820x, final_start, final_end - final_start, new_type); real_updated_size += final_end - final_start; /* * left range could be head or tail, so need to update * size at first. */ ei->size -= final_end - final_start; if (ei->addr < final_start) continue; ei->addr = final_end; } return real_updated_size; } u64 __init e820_update_range(u64 start, u64 size, unsigned old_type, unsigned new_type) { return __e820_update_range(&e820, start, size, old_type, new_type); } static u64 __init e820_update_range_saved(u64 start, u64 size, unsigned old_type, unsigned new_type) { return __e820_update_range(&e820_saved, start, size, old_type, new_type); } /* make e820 not cover the range */ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, int checktype) { int i; u64 end; u64 real_removed_size = 0; if (size > (ULLONG_MAX - start)) size = ULLONG_MAX - start; end = start + size; printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", (unsigned long long) start, (unsigned long long) end); if (checktype) e820_print_type(old_type); printk(KERN_CONT "\n"); for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; u64 final_start, final_end; u64 ei_end; if (checktype && ei->type != old_type) continue; ei_end = ei->addr + ei->size; /* totally covered? */ if (ei->addr >= start && ei_end <= end) { real_removed_size += ei->size; memset(ei, 0, sizeof(struct e820entry)); continue; } /* new range is totally covered? */ if (ei->addr < start && ei_end > end) { e820_add_region(end, ei_end - end, ei->type); ei->size = start - ei->addr; real_removed_size += size; continue; } /* partially covered */ final_start = max(start, ei->addr); final_end = min(end, ei_end); if (final_start >= final_end) continue; real_removed_size += final_end - final_start; /* * left range could be head or tail, so need to update * size at first. */ ei->size -= final_end - final_start; if (ei->addr < final_start) continue; ei->addr = final_end; } return real_removed_size; } void __init update_e820(void) { u32 nr_map; nr_map = e820.nr_map; if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map)) return; e820.nr_map = nr_map; printk(KERN_INFO "modified physical RAM map:\n"); e820_print_map("modified"); } static void __init update_e820_saved(void) { u32 nr_map; nr_map = e820_saved.nr_map; if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map)) return; e820_saved.nr_map = nr_map; } #define MAX_GAP_END 0x100000000ull /* * Search for a gap in the e820 memory space from start_addr to end_addr. */ __init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, unsigned long start_addr, unsigned long long end_addr) { unsigned long long last; int i = e820.nr_map; int found = 0; last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END; while (--i >= 0) { unsigned long long start = e820.map[i].addr; unsigned long long end = start + e820.map[i].size; if (end < start_addr) continue; /* * Since "last" is at most 4GB, we know we'll * fit in 32 bits if this condition is true */ if (last > end) { unsigned long gap = last - end; if (gap >= *gapsize) { *gapsize = gap; *gapstart = end; found = 1; } } if (start < last) last = start; } return found; } /* * Search for the biggest gap in the low 32 bits of the e820 * memory space. We pass this space to PCI to assign MMIO resources * for hotplug or unconfigured devices in. * Hopefully the BIOS let enough space left. */ __init void e820_setup_gap(void) { unsigned long gapstart, gapsize; int found; gapstart = 0x10000000; gapsize = 0x400000; found = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END); #ifdef CONFIG_X86_64 if (!found) { gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n" "PCI: Unassigned devices with 32bit resource registers may break!\n"); } #endif /* * e820_reserve_resources_late protect stolen RAM already */ pci_mem_start = gapstart; printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", pci_mem_start, gapstart, gapsize); } /** * Because of the size limitation of struct boot_params, only first * 128 E820 memory entries are passed to kernel via * boot_params.e820_map, others are passed via SETUP_E820_EXT node of * linked list of struct setup_data, which is parsed here. */ void __init parse_e820_ext(struct setup_data *sdata) { int entries; struct e820entry *extmap; entries = sdata->len / sizeof(struct e820entry); extmap = (struct e820entry *)(sdata->data); __append_e820_map(extmap, entries); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); printk(KERN_INFO "extended physical RAM map:\n"); e820_print_map("extended"); } #if defined(CONFIG_X86_64) || \ (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) /** * Find the ranges of physical addresses that do not correspond to * e820 RAM areas and mark the corresponding pages as nosave for * hibernation (32 bit) or software suspend and suspend to RAM (64 bit). * * This function requires the e820 map to be sorted and without any * overlapping entries and assumes the first e820 area to be RAM. */ void __init e820_mark_nosave_regions(unsigned long limit_pfn) { int i; unsigned long pfn; pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size); for (i = 1; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; if (pfn < PFN_UP(ei->addr)) register_nosave_region(pfn, PFN_UP(ei->addr)); pfn = PFN_DOWN(ei->addr + ei->size); if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) register_nosave_region(PFN_UP(ei->addr), pfn); if (pfn >= limit_pfn) break; } } #endif #ifdef CONFIG_HIBERNATION /** * Mark ACPI NVS memory region, so that we can save/restore it during * hibernation and the subsequent resume. */ static int __init e820_mark_nvs_memory(void) { int i; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; if (ei->type == E820_NVS) suspend_nvs_register(ei->addr, ei->size); } return 0; } core_initcall(e820_mark_nvs_memory); #endif /* * pre allocated 4k and reserved it in memblock and e820_saved */ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) { u64 size = 0; u64 addr; u64 start; for (start = startt; ; start += size) { start = memblock_x86_find_in_range_size(start, &size, align); if (start == MEMBLOCK_ERROR) return 0; if (size >= sizet) break; } #ifdef CONFIG_X86_32 if (start >= MAXMEM) return 0; if (start + size > MAXMEM) size = MAXMEM - start; #endif addr = round_down(start + size - sizet, align); if (addr < start) return 0; memblock_x86_reserve_range(addr, addr + sizet, "new next"); e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); update_e820_saved(); return addr; } #ifdef CONFIG_X86_32 # ifdef CONFIG_X86_PAE # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT)) # else # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT)) # endif #else /* CONFIG_X86_32 */ # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT #endif /* * Find the highest page frame number we have available */ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) { int i; unsigned long last_pfn = 0; unsigned long max_arch_pfn = MAX_ARCH_PFN; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; unsigned long start_pfn; unsigned long end_pfn; if (ei->type != type) continue; start_pfn = ei->addr >> PAGE_SHIFT; end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT; if (start_pfn >= limit_pfn) continue; if (end_pfn > limit_pfn) { last_pfn = limit_pfn; break; } if (end_pfn > last_pfn) last_pfn = end_pfn; } if (last_pfn > max_arch_pfn) last_pfn = max_arch_pfn; printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n", last_pfn, max_arch_pfn); return last_pfn; } unsigned long __init e820_end_of_ram_pfn(void) { return e820_end_pfn(MAX_ARCH_PFN, E820_RAM); } unsigned long __init e820_end_of_low_ram_pfn(void) { return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); } static void early_panic(char *msg) { early_printk(msg); panic(msg); } static int userdef __initdata; /* "mem=nopentium" disables the 4MB page tables. */ static int __init parse_memopt(char *p) { u64 mem_size; if (!p) return -EINVAL; if (!strcmp(p, "nopentium")) { #ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_PSE); return 0; #else printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); return -EINVAL; #endif } userdef = 1; mem_size = memparse(p, &p); /* don't remove all of memory when handling "mem={invalid}" param */ if (mem_size == 0) return -EINVAL; e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return 0; } early_param("mem", parse_memopt); static int __init parse_memmap_opt(char *p) { char *oldp; u64 start_at, mem_size; if (!p) return -EINVAL; if (!strncmp(p, "exactmap", 8)) { #ifdef CONFIG_CRASH_DUMP /* * If we are doing a crash dump, we still need to know * the real mem size before original memory map is * reset. */ saved_max_pfn = e820_end_of_ram_pfn(); #endif e820.nr_map = 0; userdef = 1; return 0; } oldp = p; mem_size = memparse(p, &p); if (p == oldp) return -EINVAL; userdef = 1; if (*p == '@') { start_at = memparse(p+1, &p); e820_add_region(start_at, mem_size, E820_RAM); } else if (*p == '#') { start_at = memparse(p+1, &p); e820_add_region(start_at, mem_size, E820_ACPI); } else if (*p == '$') { start_at = memparse(p+1, &p); e820_add_region(start_at, mem_size, E820_RESERVED); } else e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return *p == '\0' ? 0 : -EINVAL; } early_param("memmap", parse_memmap_opt); void __init finish_e820_parsing(void) { if (userdef) { u32 nr = e820.nr_map; if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0) early_panic("Invalid user supplied memory map"); e820.nr_map = nr; printk(KERN_INFO "user-defined physical RAM map:\n"); e820_print_map("user"); } } static inline const char *e820_type_to_string(int e820_type) { switch (e820_type) { case E820_RESERVED_KERN: case E820_RAM: return "System RAM"; case E820_ACPI: return "ACPI Tables"; case E820_NVS: return "ACPI Non-volatile Storage"; case E820_UNUSABLE: return "Unusable memory"; default: return "reserved"; } } /* * Mark e820 reserved areas as busy for the resource manager. */ static struct resource __initdata *e820_res; void __init e820_reserve_resources(void) { int i; struct resource *res; u64 end; res = alloc_bootmem(sizeof(struct resource) * e820.nr_map); e820_res = res; for (i = 0; i < e820.nr_map; i++) { end = e820.map[i].addr + e820.map[i].size - 1; if (end != (resource_size_t)end) { res++; continue; } res->name = e820_type_to_string(e820.map[i].type); res->start = e820.map[i].addr; res->end = end; res->flags = IORESOURCE_MEM; /* * don't register the region that could be conflicted with * pci device BAR resource and insert them later in * pcibios_resource_survey() */ if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) { res->flags |= IORESOURCE_BUSY; insert_resource(&iomem_resource, res); } res++; } for (i = 0; i < e820_saved.nr_map; i++) { struct e820entry *entry = &e820_saved.map[i]; firmware_map_add_early(entry->addr, entry->addr + entry->size - 1, e820_type_to_string(entry->type)); } } /* How much should we pad RAM ending depending on where it is? */ static unsigned long ram_alignment(resource_size_t pos) { unsigned long mb = pos >> 20; /* To 64kB in the first megabyte */ if (!mb) return 64*1024; /* To 1MB in the first 16MB */ if (mb < 16) return 1024*1024; /* To 64MB for anything above that */ return 64*1024*1024; } #define MAX_RESOURCE_SIZE ((resource_size_t)-1) void __init e820_reserve_resources_late(void) { int i; struct resource *res; res = e820_res; for (i = 0; i < e820.nr_map; i++) { if (!res->parent && res->end) insert_resource_expand_to_fit(&iomem_resource, res); res++; } /* * Try to bump up RAM regions to reasonable boundaries to * avoid stolen RAM: */ for (i = 0; i < e820.nr_map; i++) { struct e820entry *entry = &e820.map[i]; u64 start, end; if (entry->type != E820_RAM) continue; start = entry->addr + entry->size; end = round_up(start, ram_alignment(start)) - 1; if (end > MAX_RESOURCE_SIZE) end = MAX_RESOURCE_SIZE; if (start >= end) continue; printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ", start, end); reserve_region_with_split(&iomem_resource, start, end, "RAM buffer"); } } char *__init default_machine_specific_memory_setup(void) { char *who = "BIOS-e820"; u32 new_nr; /* * Try to copy the BIOS-supplied E820-map. * * Otherwise fake a memory map; one section from 0k->640k, * the next section from 1mb->appropriate_mem_k */ new_nr = boot_params.e820_entries; sanitize_e820_map(boot_params.e820_map, ARRAY_SIZE(boot_params.e820_map), &new_nr); boot_params.e820_entries = new_nr; if (append_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0) { u64 mem_size; /* compare results from other methods and take the greater */ if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) { mem_size = boot_params.screen_info.ext_mem_k; who = "BIOS-88"; } else { mem_size = boot_params.alt_mem_k; who = "BIOS-e801"; } e820.nr_map = 0; e820_add_region(0, LOWMEMSIZE(), E820_RAM); e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM); } /* In case someone cares... */ return who; } void __init setup_memory_map(void) { char *who; who = x86_init.resources.memory_setup(); memcpy(&e820_saved, &e820, sizeof(struct e820map)); printk(KERN_INFO "BIOS-provided physical RAM map:\n"); e820_print_map(who); } void __init memblock_x86_fill(void) { int i; u64 end; /* * EFI may have more than 128 entries * We are safe to enable resizing, beause memblock_x86_fill() * is rather later for x86 */ memblock_can_resize = 1; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; end = ei->addr + ei->size; if (end != (resource_size_t)end) continue; if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) continue; memblock_add(ei->addr, ei->size); } memblock_analyze(); memblock_dump_all(); } void __init memblock_find_dma_reserve(void) { #ifdef CONFIG_X86_64 u64 free_size_pfn; u64 mem_size_pfn; /* * need to find out used area below MAX_DMA_PFN * need to use memblock to get free size in [0, MAX_DMA_PFN] * at first, and assume boot_mem will not take below MAX_DMA_PFN */ mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; set_dma_reserve(mem_size_pfn - free_size_pfn); #endif }
gpl-2.0
elettronicagf/kernel-am335x
fs/dlm/dir.c
2854
7597
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "lowcomms.h" #include "rcom.h" #include "config.h" #include "memory.h" #include "recover.h" #include "util.h" #include "lock.h" #include "dir.h" /* * We use the upper 16 bits of the hash value to select the directory node. * Low bits are used for distribution of rsb's among hash buckets on each node. * * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of * num_nodes to the hash value. This value in the desired range is used as an * offset into the sorted list of nodeid's to give the particular nodeid. */ int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash) { uint32_t node; if (ls->ls_num_nodes == 1) return dlm_our_nodeid(); else { node = (hash >> 16) % ls->ls_total_weight; return ls->ls_node_array[node]; } } int dlm_dir_nodeid(struct dlm_rsb *r) { return r->res_dir_nodeid; } void dlm_recover_dir_nodeid(struct dlm_ls *ls) { struct dlm_rsb *r; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash); } up_read(&ls->ls_root_sem); } int dlm_recover_directory(struct dlm_ls *ls) { struct dlm_member *memb; char *b, *last_name = NULL; int error = -ENOMEM, last_len, nodeid, result; uint16_t namelen; unsigned int count = 0, count_match = 0, count_bad = 0, count_add = 0; log_debug(ls, "dlm_recover_directory"); if (dlm_no_directory(ls)) goto out_status; last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS); if (!last_name) goto out; list_for_each_entry(memb, &ls->ls_nodes, list) { if (memb->nodeid == dlm_our_nodeid()) continue; memset(last_name, 0, DLM_RESNAME_MAXLEN); last_len = 0; for (;;) { int left; error = dlm_recovery_stopped(ls); if (error) goto out_free; error = dlm_rcom_names(ls, memb->nodeid, last_name, last_len); if (error) goto out_free; cond_resched(); /* * pick namelen/name pairs out of received buffer */ b = ls->ls_recover_buf->rc_buf; left = ls->ls_recover_buf->rc_header.h_length; left -= sizeof(struct dlm_rcom); for (;;) { __be16 v; error = -EINVAL; if (left < sizeof(__be16)) goto out_free; memcpy(&v, b, sizeof(__be16)); namelen = be16_to_cpu(v); b += sizeof(__be16); left -= sizeof(__be16); /* namelen of 0xFFFFF marks end of names for this node; namelen of 0 marks end of the buffer */ if (namelen == 0xFFFF) goto done; if (!namelen) break; if (namelen > left) goto out_free; if (namelen > DLM_RESNAME_MAXLEN) goto out_free; error = dlm_master_lookup(ls, memb->nodeid, b, namelen, DLM_LU_RECOVER_DIR, &nodeid, &result); if (error) { log_error(ls, "recover_dir lookup %d", error); goto out_free; } /* The name was found in rsbtbl, but the * master nodeid is different from * memb->nodeid which says it is the master. * This should not happen. */ if (result == DLM_LU_MATCH && nodeid != memb->nodeid) { count_bad++; log_error(ls, "recover_dir lookup %d " "nodeid %d memb %d bad %u", result, nodeid, memb->nodeid, count_bad); print_hex_dump_bytes("dlm_recover_dir ", DUMP_PREFIX_NONE, b, namelen); } /* The name was found in rsbtbl, and the * master nodeid matches memb->nodeid. */ if (result == DLM_LU_MATCH && nodeid == memb->nodeid) { count_match++; } /* The name was not found in rsbtbl and was * added with memb->nodeid as the master. */ if (result == DLM_LU_ADD) { count_add++; } last_len = namelen; memcpy(last_name, b, namelen); b += namelen; left -= namelen; count++; } } done: ; } out_status: error = 0; dlm_set_recover_status(ls, DLM_RS_DIR); log_debug(ls, "dlm_recover_directory %u in %u new", count, count_add); out_free: kfree(last_name); out: return error; } static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len) { struct dlm_rsb *r; uint32_t hash, bucket; int rv; hash = jhash(name, len, 0); bucket = hash & (ls->ls_rsbtbl_size - 1); spin_lock(&ls->ls_rsbtbl[bucket].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); if (rv) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, name, len, &r); spin_unlock(&ls->ls_rsbtbl[bucket].lock); if (!rv) return r; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (len == r->res_length && !memcmp(name, r->res_name, len)) { up_read(&ls->ls_root_sem); log_debug(ls, "find_rsb_root revert to root_list %s", r->res_name); return r; } } up_read(&ls->ls_root_sem); return NULL; } /* Find the rsb where we left off (or start again), then send rsb names for rsb's we're master of and whose directory node matches the requesting node. inbuf is the rsb name last sent, inlen is the name's length */ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, char *outbuf, int outlen, int nodeid) { struct list_head *list; struct dlm_rsb *r; int offset = 0, dir_nodeid; __be16 be_namelen; down_read(&ls->ls_root_sem); if (inlen > 1) { r = find_rsb_root(ls, inbuf, inlen); if (!r) { inbuf[inlen - 1] = '\0'; log_error(ls, "copy_master_names from %d start %d %s", nodeid, inlen, inbuf); goto out; } list = r->res_root_list.next; } else { list = ls->ls_root_list.next; } for (offset = 0; list != &ls->ls_root_list; list = list->next) { r = list_entry(list, struct dlm_rsb, res_root_list); if (r->res_nodeid) continue; dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid != nodeid) continue; /* * The block ends when we can't fit the following in the * remaining buffer space: * namelen (uint16_t) + * name (r->res_length) + * end-of-block record 0x0000 (uint16_t) */ if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) { /* Write end-of-block record */ be_namelen = cpu_to_be16(0); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); ls->ls_recover_dir_sent_msg++; goto out; } be_namelen = cpu_to_be16(r->res_length); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); memcpy(outbuf + offset, r->res_name, r->res_length); offset += r->res_length; ls->ls_recover_dir_sent_res++; } /* * If we've reached the end of the list (and there's room) write a * terminating record. */ if ((list == &ls->ls_root_list) && (offset + sizeof(uint16_t) <= outlen)) { be_namelen = cpu_to_be16(0xFFFF); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); ls->ls_recover_dir_sent_msg++; } out: up_read(&ls->ls_root_sem); }
gpl-2.0
iconiaN/android_kernel_acer_a700
drivers/media/dvb/firewire/firedtv-dvb.c
3110
5841
/* * FireDTV driver (formerly known as FireSAT) * * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com> * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. */ #include <linux/bitops.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <dmxdev.h> #include <dvb_demux.h> #include <dvbdev.h> #include <dvb_frontend.h> #include "firedtv.h" static int alloc_channel(struct firedtv *fdtv) { int i; for (i = 0; i < 16; i++) if (!__test_and_set_bit(i, &fdtv->channel_active)) break; return i; } static void collect_channels(struct firedtv *fdtv, int *pidc, u16 pid[]) { int i, n; for (i = 0, n = 0; i < 16; i++) if (test_bit(i, &fdtv->channel_active)) pid[n++] = fdtv->channel_pid[i]; *pidc = n; } static inline void dealloc_channel(struct firedtv *fdtv, int i) { __clear_bit(i, &fdtv->channel_active); } int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed) { struct firedtv *fdtv = dvbdmxfeed->demux->priv; int pidc, c, ret; u16 pids[16]; switch (dvbdmxfeed->type) { case DMX_TYPE_TS: case DMX_TYPE_SEC: break; default: dev_err(fdtv->device, "can't start dmx feed: invalid type %u\n", dvbdmxfeed->type); return -EINVAL; } if (mutex_lock_interruptible(&fdtv->demux_mutex)) return -EINTR; if (dvbdmxfeed->type == DMX_TYPE_TS) { switch (dvbdmxfeed->pes_type) { case DMX_TS_PES_VIDEO: case DMX_TS_PES_AUDIO: case DMX_TS_PES_TELETEXT: case DMX_TS_PES_PCR: case DMX_TS_PES_OTHER: c = alloc_channel(fdtv); break; default: dev_err(fdtv->device, "can't start dmx feed: invalid pes type %u\n", dvbdmxfeed->pes_type); ret = -EINVAL; goto out; } } else { c = alloc_channel(fdtv); } if (c > 15) { dev_err(fdtv->device, "can't start dmx feed: busy\n"); ret = -EBUSY; goto out; } dvbdmxfeed->priv = (typeof(dvbdmxfeed->priv))(unsigned long)c; fdtv->channel_pid[c] = dvbdmxfeed->pid; collect_channels(fdtv, &pidc, pids); if (dvbdmxfeed->pid == 8192) { ret = avc_tuner_get_ts(fdtv); if (ret) { dealloc_channel(fdtv, c); dev_err(fdtv->device, "can't get TS\n"); goto out; } } else { ret = avc_tuner_set_pids(fdtv, pidc, pids); if (ret) { dealloc_channel(fdtv, c); dev_err(fdtv->device, "can't set PIDs\n"); goto out; } } out: mutex_unlock(&fdtv->demux_mutex); return ret; } int fdtv_stop_feed(struct dvb_demux_feed *dvbdmxfeed) { struct dvb_demux *demux = dvbdmxfeed->demux; struct firedtv *fdtv = demux->priv; int pidc, c, ret; u16 pids[16]; if (dvbdmxfeed->type == DMX_TYPE_TS && !((dvbdmxfeed->ts_type & TS_PACKET) && (demux->dmx.frontend->source != DMX_MEMORY_FE))) { if (dvbdmxfeed->ts_type & TS_DECODER) { if (dvbdmxfeed->pes_type >= DMX_TS_PES_OTHER || !demux->pesfilter[dvbdmxfeed->pes_type]) return -EINVAL; demux->pids[dvbdmxfeed->pes_type] |= 0x8000; demux->pesfilter[dvbdmxfeed->pes_type] = NULL; } if (!(dvbdmxfeed->ts_type & TS_DECODER && dvbdmxfeed->pes_type < DMX_TS_PES_OTHER)) return 0; } if (mutex_lock_interruptible(&fdtv->demux_mutex)) return -EINTR; c = (unsigned long)dvbdmxfeed->priv; dealloc_channel(fdtv, c); collect_channels(fdtv, &pidc, pids); ret = avc_tuner_set_pids(fdtv, pidc, pids); mutex_unlock(&fdtv->demux_mutex); return ret; } DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); int fdtv_dvb_register(struct firedtv *fdtv, const char *name) { int err; err = dvb_register_adapter(&fdtv->adapter, name, THIS_MODULE, fdtv->device, adapter_nr); if (err < 0) goto fail_log; /*DMX_TS_FILTERING | DMX_SECTION_FILTERING*/ fdtv->demux.dmx.capabilities = 0; fdtv->demux.priv = fdtv; fdtv->demux.filternum = 16; fdtv->demux.feednum = 16; fdtv->demux.start_feed = fdtv_start_feed; fdtv->demux.stop_feed = fdtv_stop_feed; fdtv->demux.write_to_decoder = NULL; err = dvb_dmx_init(&fdtv->demux); if (err) goto fail_unreg_adapter; fdtv->dmxdev.filternum = 16; fdtv->dmxdev.demux = &fdtv->demux.dmx; fdtv->dmxdev.capabilities = 0; err = dvb_dmxdev_init(&fdtv->dmxdev, &fdtv->adapter); if (err) goto fail_dmx_release; fdtv->frontend.source = DMX_FRONTEND_0; err = fdtv->demux.dmx.add_frontend(&fdtv->demux.dmx, &fdtv->frontend); if (err) goto fail_dmxdev_release; err = fdtv->demux.dmx.connect_frontend(&fdtv->demux.dmx, &fdtv->frontend); if (err) goto fail_rem_frontend; dvb_net_init(&fdtv->adapter, &fdtv->dvbnet, &fdtv->demux.dmx); fdtv_frontend_init(fdtv, name); err = dvb_register_frontend(&fdtv->adapter, &fdtv->fe); if (err) goto fail_net_release; err = fdtv_ca_register(fdtv); if (err) dev_info(fdtv->device, "Conditional Access Module not enabled\n"); return 0; fail_net_release: dvb_net_release(&fdtv->dvbnet); fdtv->demux.dmx.close(&fdtv->demux.dmx); fail_rem_frontend: fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend); fail_dmxdev_release: dvb_dmxdev_release(&fdtv->dmxdev); fail_dmx_release: dvb_dmx_release(&fdtv->demux); fail_unreg_adapter: dvb_unregister_adapter(&fdtv->adapter); fail_log: dev_err(fdtv->device, "DVB initialization failed\n"); return err; } void fdtv_dvb_unregister(struct firedtv *fdtv) { fdtv_ca_release(fdtv); dvb_unregister_frontend(&fdtv->fe); dvb_net_release(&fdtv->dvbnet); fdtv->demux.dmx.close(&fdtv->demux.dmx); fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend); dvb_dmxdev_release(&fdtv->dmxdev); dvb_dmx_release(&fdtv->demux); dvb_unregister_adapter(&fdtv->adapter); }
gpl-2.0
Evil-Green/Lonas_KL-GT-I9300
mm/page_isolation.c
3878
3644
/* * linux/mm/page_isolation.c */ #include <linux/mm.h> #include <linux/page-isolation.h> #include <linux/pageblock-flags.h> #include "internal.h" static inline struct page * __first_valid_page(unsigned long pfn, unsigned long nr_pages) { int i; for (i = 0; i < nr_pages; i++) if (pfn_valid_within(pfn + i)) break; if (unlikely(i == nr_pages)) return NULL; return pfn_to_page(pfn + i); } /* * start_isolate_page_range() -- make page-allocation-type of range of pages * to be MIGRATE_ISOLATE. * @start_pfn: The lower PFN of the range to be isolated. * @end_pfn: The upper PFN of the range to be isolated. * * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the * future will not be allocated again. * * start_pfn/end_pfn must be aligned to pageblock_order. * Returns 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; unsigned long undo_pfn; struct page *page; BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && set_migratetype_isolate(page)) { undo_pfn = pfn; goto undo; } } return 0; undo: for (pfn = start_pfn; pfn < undo_pfn; pfn += pageblock_nr_pages) unset_migratetype_isolate(pfn_to_page(pfn)); return -EBUSY; } /* * Make isolated pages available again. */ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) continue; unset_migratetype_isolate(page); } return 0; } /* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * * Returns 1 if all pages in the range is isolated. */ static int __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) { struct page *page; while (pfn < end_pfn) { if (!pfn_valid_within(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); if (PageBuddy(page)) pfn += 1 << page_order(page); else if (page_count(page) == 0 && page_private(page) == MIGRATE_ISOLATE) pfn += 1; else break; } if (pfn < end_pfn) return 0; return 1; } int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn, flags; struct page *page; struct zone *zone; int ret; /* * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page * is not aligned to pageblock_nr_pages. * Then we just check pagetype fist. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) break; } page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) return -EBUSY; /* Check all pages are free or Marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); spin_unlock_irqrestore(&zone->lock, flags); return ret ? 0 : -EBUSY; }
gpl-2.0
ShinySide/G900T_Permissive_FOJ4
drivers/mtd/nand/ams-delta.c
4902
7450
/* * drivers/mtd/nand/ams-delta.c * * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li> * * Derived from drivers/mtd/toto.c * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> * Partially stolen from drivers/mtd/nand/plat_nand.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Overview: * This is a device driver for the NAND flash device found on the * Amstrad E3 (Delta). */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <mach/hardware.h> #include <asm/sizes.h> #include <linux/gpio.h> #include <plat/board-ams-delta.h> /* * MTD structure for E3 (Delta) */ static struct mtd_info *ams_delta_mtd = NULL; /* * Define partitions for flash devices */ static struct mtd_partition partition_info[] = { { .name = "Kernel", .offset = 0, .size = 3 * SZ_1M + SZ_512K }, { .name = "u-boot", .offset = 3 * SZ_1M + SZ_512K, .size = SZ_256K }, { .name = "u-boot params", .offset = 3 * SZ_1M + SZ_512K + SZ_256K, .size = SZ_256K }, { .name = "Amstrad LDR", .offset = 4 * SZ_1M, .size = SZ_256K }, { .name = "File system", .offset = 4 * SZ_1M + 1 * SZ_256K, .size = 27 * SZ_1M }, { .name = "PBL reserved", .offset = 32 * SZ_1M - 3 * SZ_256K, .size = 3 * SZ_256K }, }; static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte) { struct nand_chip *this = mtd->priv; void __iomem *io_base = this->priv; writew(0, io_base + OMAP_MPUIO_IO_CNTL); writew(byte, this->IO_ADDR_W); gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0); ndelay(40); gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1); } static u_char ams_delta_read_byte(struct mtd_info *mtd) { u_char res; struct nand_chip *this = mtd->priv; void __iomem *io_base = this->priv; gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0); ndelay(40); writew(~0, io_base + OMAP_MPUIO_IO_CNTL); res = readw(this->IO_ADDR_R); gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1); return res; } static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { int i; for (i=0; i<len; i++) ams_delta_write_byte(mtd, buf[i]); } static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len) { int i; for (i=0; i<len; i++) buf[i] = ams_delta_read_byte(mtd); } static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) { int i; for (i=0; i<len; i++) if (buf[i] != ams_delta_read_byte(mtd)) return -EFAULT; return 0; } /* * Command control function * * ctrl: * NAND_NCE: bit 0 -> bit 2 * NAND_CLE: bit 1 -> bit 7 * NAND_ALE: bit 2 -> bit 6 */ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { if (ctrl & NAND_CTRL_CHANGE) { gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE, (ctrl & NAND_NCE) == 0); gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE, (ctrl & NAND_CLE) != 0); gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE, (ctrl & NAND_ALE) != 0); } if (cmd != NAND_CMD_NONE) ams_delta_write_byte(mtd, cmd); } static int ams_delta_nand_ready(struct mtd_info *mtd) { return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB); } static const struct gpio _mandatory_gpio[] = { { .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE, .flags = GPIOF_OUT_INIT_HIGH, .label = "nand_nce", }, { .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE, .flags = GPIOF_OUT_INIT_HIGH, .label = "nand_nre", }, { .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP, .flags = GPIOF_OUT_INIT_HIGH, .label = "nand_nwp", }, { .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE, .flags = GPIOF_OUT_INIT_HIGH, .label = "nand_nwe", }, { .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE, .flags = GPIOF_OUT_INIT_LOW, .label = "nand_ale", }, { .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE, .flags = GPIOF_OUT_INIT_LOW, .label = "nand_cle", }, }; /* * Main initialization routine */ static int __devinit ams_delta_init(struct platform_device *pdev) { struct nand_chip *this; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *io_base; int err = 0; if (!res) return -ENXIO; /* Allocate memory for MTD device structure and private data */ ams_delta_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!ams_delta_mtd) { printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n"); err = -ENOMEM; goto out; } ams_delta_mtd->owner = THIS_MODULE; /* Get pointer to private data */ this = (struct nand_chip *) (&ams_delta_mtd[1]); /* Initialize structures */ memset(ams_delta_mtd, 0, sizeof(struct mtd_info)); memset(this, 0, sizeof(struct nand_chip)); /* Link the private data with the MTD structure */ ams_delta_mtd->priv = this; /* * Don't try to request the memory region from here, * it should have been already requested from the * gpio-omap driver and requesting it again would fail. */ io_base = ioremap(res->start, resource_size(res)); if (io_base == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); err = -EIO; goto out_free; } this->priv = io_base; /* Set address of NAND IO lines */ this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH; this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT; this->read_byte = ams_delta_read_byte; this->write_buf = ams_delta_write_buf; this->read_buf = ams_delta_read_buf; this->verify_buf = ams_delta_verify_buf; this->cmd_ctrl = ams_delta_hwcontrol; if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) { this->dev_ready = ams_delta_nand_ready; } else { this->dev_ready = NULL; printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n"); } /* 25 us command delay time */ this->chip_delay = 30; this->ecc.mode = NAND_ECC_SOFT; platform_set_drvdata(pdev, io_base); /* Set chip enabled, but */ err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); if (err) goto out_gpio; /* Scan to find existence of the device */ if (nand_scan(ams_delta_mtd, 1)) { err = -ENXIO; goto out_mtd; } /* Register the partitions */ mtd_device_register(ams_delta_mtd, partition_info, ARRAY_SIZE(partition_info)); goto out; out_mtd: gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); out_gpio: platform_set_drvdata(pdev, NULL); gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); iounmap(io_base); out_free: kfree(ams_delta_mtd); out: return err; } /* * Clean up routine */ static int __devexit ams_delta_cleanup(struct platform_device *pdev) { void __iomem *io_base = platform_get_drvdata(pdev); /* Release resources, unregister device */ nand_release(ams_delta_mtd); gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); iounmap(io_base); /* Free the MTD device structure */ kfree(ams_delta_mtd); return 0; } static struct platform_driver ams_delta_nand_driver = { .probe = ams_delta_init, .remove = __devexit_p(ams_delta_cleanup), .driver = { .name = "ams-delta-nand", .owner = THIS_MODULE, }, }; module_platform_driver(ams_delta_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>"); MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
gpl-2.0
KronicKernel/falcon
fs/compat_binfmt_elf.c
9254
3540
/* * 32-bit compatibility support for ELF format executables and core dumps. * * Copyright (C) 2007 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * Red Hat Author: Roland McGrath. * * This file is used in a 64-bit kernel that wants to support 32-bit ELF. * asm/elf.h is responsible for defining the compat_* and COMPAT_* macros * used below, with definitions appropriate for 32-bit ABI compatibility. * * We use macros to rename the ABI types and machine-dependent * functions used in binfmt_elf.c to compat versions. */ #include <linux/elfcore-compat.h> #include <linux/time.h> /* * Rename the basic ELF layout types to refer to the 32-bit class of files. */ #undef ELF_CLASS #define ELF_CLASS ELFCLASS32 #undef elfhdr #undef elf_phdr #undef elf_shdr #undef elf_note #undef elf_addr_t #define elfhdr elf32_hdr #define elf_phdr elf32_phdr #define elf_shdr elf32_shdr #define elf_note elf32_note #define elf_addr_t Elf32_Addr /* * The machine-dependent core note format types are defined in elfcore-compat.h, * which requires asm/elf.h to define compat_elf_gregset_t et al. */ #define elf_prstatus compat_elf_prstatus #define elf_prpsinfo compat_elf_prpsinfo /* * Compat version of cputime_to_compat_timeval, perhaps this * should be an inline in <linux/compat.h>. */ static void cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) { struct timeval tv; cputime_to_timeval(cputime, &tv); value->tv_sec = tv.tv_sec; value->tv_usec = tv.tv_usec; } #undef cputime_to_timeval #define cputime_to_timeval cputime_to_compat_timeval /* * To use this file, asm/elf.h must define compat_elf_check_arch. * The other following macros can be defined if the compat versions * differ from the native ones, or omitted when they match. */ #undef ELF_ARCH #undef elf_check_arch #define elf_check_arch compat_elf_check_arch #ifdef COMPAT_ELF_PLATFORM #undef ELF_PLATFORM #define ELF_PLATFORM COMPAT_ELF_PLATFORM #endif #ifdef COMPAT_ELF_HWCAP #undef ELF_HWCAP #define ELF_HWCAP COMPAT_ELF_HWCAP #endif #ifdef COMPAT_ARCH_DLINFO #undef ARCH_DLINFO #define ARCH_DLINFO COMPAT_ARCH_DLINFO #endif #ifdef COMPAT_ELF_ET_DYN_BASE #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE COMPAT_ELF_ET_DYN_BASE #endif #ifdef COMPAT_ELF_EXEC_PAGESIZE #undef ELF_EXEC_PAGESIZE #define ELF_EXEC_PAGESIZE COMPAT_ELF_EXEC_PAGESIZE #endif #ifdef COMPAT_ELF_PLAT_INIT #undef ELF_PLAT_INIT #define ELF_PLAT_INIT COMPAT_ELF_PLAT_INIT #endif #ifdef COMPAT_SET_PERSONALITY #undef SET_PERSONALITY #define SET_PERSONALITY COMPAT_SET_PERSONALITY #endif #ifdef compat_start_thread #undef start_thread #define start_thread compat_start_thread #endif #ifdef compat_arch_setup_additional_pages #undef ARCH_HAS_SETUP_ADDITIONAL_PAGES #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 #undef arch_setup_additional_pages #define arch_setup_additional_pages compat_arch_setup_additional_pages #endif /* * Rename a few of the symbols that binfmt_elf.c will define. * These are all local so the names don't really matter, but it * might make some debugging less confusing not to duplicate them. */ #define elf_format compat_elf_format #define init_elf_binfmt init_compat_elf_binfmt #define exit_elf_binfmt exit_compat_elf_binfmt /* * We share all the actual code with the native (64-bit) version. */ #include "binfmt_elf.c"
gpl-2.0
NoelMacwan/SXDNickiKernels
arch/powerpc/platforms/8xx/m8xx_setup.c
9510
6685
/* * Copyright (C) 1995 Linus Torvalds * Adapted from 'alpha' version by Gary Thomas * Modified by Cort Dougan (cort@cs.nmt.edu) * Modified for MBX using prep/chrp/pmac functions by Dan (dmalek@jlc.net) * Further modified for generic 8xx by Dan. */ /* * bootup setup stuff.. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/time.h> #include <linux/rtc.h> #include <linux/fsl_devices.h> #include <asm/io.h> #include <asm/mpc8xx.h> #include <asm/8xx_immap.h> #include <asm/prom.h> #include <asm/fs_pd.h> #include <mm/mmu_decl.h> #include <sysdev/mpc8xx_pic.h> #include "mpc8xx.h" struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops; extern int cpm_pic_init(void); extern int cpm_get_irq(void); /* A place holder for time base interrupts, if they are ever enabled. */ static irqreturn_t timebase_interrupt(int irq, void *dev) { printk ("timebase_interrupt()\n"); return IRQ_HANDLED; } static struct irqaction tbint_irqaction = { .handler = timebase_interrupt, .name = "tbint", }; /* per-board overridable init_internal_rtc() function. */ void __init __attribute__ ((weak)) init_internal_rtc(void) { sit8xx_t __iomem *sys_tmr = immr_map(im_sit); /* Disable the RTC one second and alarm interrupts. */ clrbits16(&sys_tmr->sit_rtcsc, (RTCSC_SIE | RTCSC_ALE)); /* Enable the RTC */ setbits16(&sys_tmr->sit_rtcsc, (RTCSC_RTF | RTCSC_RTE)); immr_unmap(sys_tmr); } static int __init get_freq(char *name, unsigned long *val) { struct device_node *cpu; const unsigned int *fp; int found = 0; /* The cpu node should have timebase and clock frequency properties */ cpu = of_find_node_by_type(NULL, "cpu"); if (cpu) { fp = of_get_property(cpu, name, NULL); if (fp) { found = 1; *val = *fp; } of_node_put(cpu); } return found; } /* The decrementer counts at the system (internal) clock frequency divided by * sixteen, or external oscillator divided by four. We force the processor * to use system clock divided by sixteen. */ void __init mpc8xx_calibrate_decr(void) { struct device_node *cpu; cark8xx_t __iomem *clk_r1; car8xx_t __iomem *clk_r2; sitk8xx_t __iomem *sys_tmr1; sit8xx_t __iomem *sys_tmr2; int irq, virq; clk_r1 = immr_map(im_clkrstk); /* Unlock the SCCR. */ out_be32(&clk_r1->cark_sccrk, ~KAPWR_KEY); out_be32(&clk_r1->cark_sccrk, KAPWR_KEY); immr_unmap(clk_r1); /* Force all 8xx processors to use divide by 16 processor clock. */ clk_r2 = immr_map(im_clkrst); setbits32(&clk_r2->car_sccr, 0x02000000); immr_unmap(clk_r2); /* Processor frequency is MHz. */ ppc_proc_freq = 50000000; if (!get_freq("clock-frequency", &ppc_proc_freq)) printk(KERN_ERR "WARNING: Estimating processor frequency " "(not found)\n"); ppc_tb_freq = ppc_proc_freq / 16; printk("Decrementer Frequency = 0x%lx\n", ppc_tb_freq); /* Perform some more timer/timebase initialization. This used * to be done elsewhere, but other changes caused it to get * called more than once....that is a bad thing. * * First, unlock all of the registers we are going to modify. * To protect them from corruption during power down, registers * that are maintained by keep alive power are "locked". To * modify these registers we have to write the key value to * the key location associated with the register. * Some boards power up with these unlocked, while others * are locked. Writing anything (including the unlock code?) * to the unlocked registers will lock them again. So, here * we guarantee the registers are locked, then we unlock them * for our use. */ sys_tmr1 = immr_map(im_sitk); out_be32(&sys_tmr1->sitk_tbscrk, ~KAPWR_KEY); out_be32(&sys_tmr1->sitk_rtcsck, ~KAPWR_KEY); out_be32(&sys_tmr1->sitk_tbk, ~KAPWR_KEY); out_be32(&sys_tmr1->sitk_tbscrk, KAPWR_KEY); out_be32(&sys_tmr1->sitk_rtcsck, KAPWR_KEY); out_be32(&sys_tmr1->sitk_tbk, KAPWR_KEY); immr_unmap(sys_tmr1); init_internal_rtc(); /* Enabling the decrementer also enables the timebase interrupts * (or from the other point of view, to get decrementer interrupts * we have to enable the timebase). The decrementer interrupt * is wired into the vector table, nothing to do here for that. */ cpu = of_find_node_by_type(NULL, "cpu"); virq= irq_of_parse_and_map(cpu, 0); irq = virq_to_hw(virq); sys_tmr2 = immr_map(im_sit); out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | (TBSCR_TBF | TBSCR_TBE)); immr_unmap(sys_tmr2); if (setup_irq(virq, &tbint_irqaction)) panic("Could not allocate timer IRQ!"); } /* The RTC on the MPC8xx is an internal register. * We want to protect this during power down, so we need to unlock, * modify, and re-lock. */ int mpc8xx_set_rtc_time(struct rtc_time *tm) { sitk8xx_t __iomem *sys_tmr1; sit8xx_t __iomem *sys_tmr2; int time; sys_tmr1 = immr_map(im_sitk); sys_tmr2 = immr_map(im_sit); time = mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); out_be32(&sys_tmr1->sitk_rtck, KAPWR_KEY); out_be32(&sys_tmr2->sit_rtc, time); out_be32(&sys_tmr1->sitk_rtck, ~KAPWR_KEY); immr_unmap(sys_tmr2); immr_unmap(sys_tmr1); return 0; } void mpc8xx_get_rtc_time(struct rtc_time *tm) { unsigned long data; sit8xx_t __iomem *sys_tmr = immr_map(im_sit); /* Get time from the RTC. */ data = in_be32(&sys_tmr->sit_rtc); to_tm(data, tm); tm->tm_year -= 1900; tm->tm_mon -= 1; immr_unmap(sys_tmr); return; } void mpc8xx_restart(char *cmd) { car8xx_t __iomem *clk_r = immr_map(im_clkrst); local_irq_disable(); setbits32(&clk_r->car_plprcr, 0x00000080); /* Clear the ME bit in MSR to cause checkstop on machine check */ mtmsr(mfmsr() & ~0x1000); in_8(&clk_r->res[0]); panic("Restart failed\n"); } static void cpm_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip; int cascade_irq; if ((cascade_irq = cpm_get_irq()) >= 0) { struct irq_desc *cdesc = irq_to_desc(cascade_irq); generic_handle_irq(cascade_irq); chip = irq_desc_get_chip(cdesc); chip->irq_eoi(&cdesc->irq_data); } chip = irq_desc_get_chip(desc); chip->irq_eoi(&desc->irq_data); } /* Initialize the internal interrupt controllers. The number of * interrupts supported can vary with the processor type, and the * 82xx family can have up to 64. * External interrupts can be either edge or level triggered, and * need to be initialized by the appropriate driver. */ void __init mpc8xx_pics_init(void) { int irq; if (mpc8xx_pic_init()) { printk(KERN_ERR "Failed interrupt 8xx controller initialization\n"); return; } irq = cpm_pic_init(); if (irq != NO_IRQ) irq_set_chained_handler(irq, cpm_cascade); }
gpl-2.0
XileForce/Vindicator-S6-Test
drivers/net/wireless/ath/ath6kl/bmi.c
9766
13142
/* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include "hif-ops.h" #include "target.h" #include "debug.h" int ath6kl_bmi_done(struct ath6kl *ar) { int ret; u32 cid = BMI_DONE; if (ar->bmi.done_sent) { ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n"); return 0; } ar->bmi.done_sent = true; ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); if (ret) { ath6kl_err("Unable to send bmi done: %d\n", ret); return ret; } return 0; } int ath6kl_bmi_get_target_info(struct ath6kl *ar, struct ath6kl_bmi_target_info *targ_info) { int ret; u32 cid = BMI_GET_TARGET_INFO; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid)); if (ret) { ath6kl_err("Unable to send get target info: %d\n", ret); return ret; } if (ar->hif_type == ATH6KL_HIF_TYPE_USB) { ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info, sizeof(*targ_info)); } else { ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version, sizeof(targ_info->version)); } if (ret) { ath6kl_err("Unable to recv target info: %d\n", ret); return ret; } if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) { /* Determine how many bytes are in the Target's targ_info */ ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->byte_count, sizeof(targ_info->byte_count)); if (ret) { ath6kl_err("unable to read target info byte count: %d\n", ret); return ret; } /* * The target's targ_info doesn't match the host's targ_info. * We need to do some backwards compatibility to make this work. */ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { WARN_ON(1); return -EINVAL; } /* Read the remainder of the targ_info */ ret = ath6kl_hif_bmi_read(ar, ((u8 *)targ_info) + sizeof(targ_info->byte_count), sizeof(*targ_info) - sizeof(targ_info->byte_count)); if (ret) { ath6kl_err("Unable to read target info (%d bytes): %d\n", targ_info->byte_count, ret); return ret; } } ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n", targ_info->version, targ_info->type); return 0; } int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) { u32 cid = BMI_READ_MEMORY; int ret; u32 offset; u32 len_remain, rx_len; u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len); if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read memory: device: addr: 0x%x, len: %d\n", addr, len); len_remain = len; while (len_remain) { rx_len = (len_remain < ar->bmi.max_data_size) ? len_remain : ar->bmi.max_data_size; offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len)); offset += sizeof(len); ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; } memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len); len_remain -= rx_len; addr += rx_len; } return 0; } int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) { u32 cid = BMI_WRITE_MEMORY; int ret; u32 offset; u32 len_remain, tx_len; const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len); u8 aligned_buf[400]; u8 *src; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf))) return -E2BIG; memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi write memory: addr: 0x%x, len: %d\n", addr, len); len_remain = len; while (len_remain) { src = &buf[len - len_remain]; if (len_remain < (ar->bmi.max_data_size - header)) { if (len_remain & 3) { /* align it with 4 bytes */ len_remain = len_remain + (4 - (len_remain & 3)); memcpy(aligned_buf, src, len_remain); src = aligned_buf; } tx_len = len_remain; } else { tx_len = (ar->bmi.max_data_size - header); } offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len)); offset += sizeof(tx_len); memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len); offset += tx_len; ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } len_remain -= tx_len; addr += tx_len; } return 0; } int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param) { u32 cid = BMI_EXECUTE; int ret; u32 offset; u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = sizeof(cid) + sizeof(addr) + sizeof(param); if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n", addr, *param); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param)); offset += sizeof(*param); ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; } memcpy(param, ar->bmi.cmd_buf, sizeof(*param)); return 0; } int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr) { u32 cid = BMI_SET_APP_START; int ret; u32 offset; u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = sizeof(cid) + sizeof(addr); if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } return 0; } int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param) { u32 cid = BMI_READ_SOC_REGISTER; int ret; u32 offset; u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = sizeof(cid) + sizeof(addr); if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param)); if (ret) { ath6kl_err("Unable to read from the device: %d\n", ret); return ret; } memcpy(param, ar->bmi.cmd_buf, sizeof(*param)); return 0; } int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param) { u32 cid = BMI_WRITE_SOC_REGISTER; int ret; u32 offset; u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = sizeof(cid) + sizeof(addr) + sizeof(param); if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi write SOC reg: addr: 0x%x, param: %d\n", addr, param); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param)); offset += sizeof(param); ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } return 0; } int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len) { u32 cid = BMI_LZ_DATA; int ret; u32 offset; u32 len_remain, tx_len; const u32 header = sizeof(cid) + sizeof(len); u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = ar->bmi.max_data_size + header; if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n", len); len_remain = len; while (len_remain) { tx_len = (len_remain < (ar->bmi.max_data_size - header)) ? len_remain : (ar->bmi.max_data_size - header); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len)); offset += sizeof(tx_len); memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain], tx_len); offset += tx_len; ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to write to the device: %d\n", ret); return ret; } len_remain -= tx_len; } return 0; } int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr) { u32 cid = BMI_LZ_STREAM_START; int ret; u32 offset; u16 size; if (ar->bmi.done_sent) { ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid); return -EACCES; } size = sizeof(cid) + sizeof(addr); if (size > ar->bmi.max_cmd_size) { WARN_ON(1); return -EINVAL; } memset(ar->bmi.cmd_buf, 0, size); ath6kl_dbg(ATH6KL_DBG_BMI, "bmi LZ stream start: addr: 0x%x)\n", addr); offset = 0; memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr)); offset += sizeof(addr); ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset); if (ret) { ath6kl_err("Unable to start LZ stream to the device: %d\n", ret); return ret; } return 0; } int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len) { int ret; u32 last_word = 0; u32 last_word_offset = len & ~0x3; u32 unaligned_bytes = len & 0x3; ret = ath6kl_bmi_lz_stream_start(ar, addr); if (ret) return ret; if (unaligned_bytes) { /* copy the last word into a zero padded buffer */ memcpy(&last_word, &buf[last_word_offset], unaligned_bytes); } ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset); if (ret) return ret; if (unaligned_bytes) ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4); if (!ret) { /* Close compressed stream and open a new (fake) one. * This serves mainly to flush Target caches. */ ret = ath6kl_bmi_lz_stream_start(ar, 0x00); } return ret; } void ath6kl_bmi_reset(struct ath6kl *ar) { ar->bmi.done_sent = false; } int ath6kl_bmi_init(struct ath6kl *ar) { if (WARN_ON(ar->bmi.max_data_size == 0)) return -EINVAL; /* cmd + addr + len + data_size */ ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3); ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC); if (!ar->bmi.cmd_buf) return -ENOMEM; return 0; } void ath6kl_bmi_cleanup(struct ath6kl *ar) { kfree(ar->bmi.cmd_buf); ar->bmi.cmd_buf = NULL; }
gpl-2.0
danielstuart14/msm-3.10-8974
drivers/gpu/drm/gma500/mdfld_tmd_vid.c
10278
7236
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Jim Liu <jim.liu@intel.com> * Jackie Li<yaodong.li@intel.com> * Gideon Eaton <eaton. * Scott Rowe <scott.m.rowe@intel.com> */ #include "mdfld_dsi_dpi.h" #include "mdfld_dsi_pkg_sender.h" static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev) { struct drm_display_mode *mode; struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; bool use_gct = false; /*Disable GCT for now*/ mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return NULL; if (use_gct) { mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; mode->hsync_start = mode->hdisplay + \ ((ti->hsync_offset_hi << 8) | \ ti->hsync_offset_lo); mode->hsync_end = mode->hsync_start + \ ((ti->hsync_pulse_width_hi << 8) | \ ti->hsync_pulse_width_lo); mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ ti->hblank_lo); mode->vsync_start = \ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ ti->vsync_offset_lo); mode->vsync_end = \ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \ ti->vsync_pulse_width_lo); mode->vtotal = mode->vdisplay + \ ((ti->vblank_hi << 8) | ti->vblank_lo); mode->clock = ti->pixel_clock * 10; dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay); dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay); dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start); dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end); dev_dbg(dev->dev, "htotal is %d\n", mode->htotal); dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start); dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end); dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal); dev_dbg(dev->dev, "clock is %d\n", mode->clock); } else { mode->hdisplay = 480; mode->vdisplay = 854; mode->hsync_start = 487; mode->hsync_end = 490; mode->htotal = 499; mode->vsync_start = 861; mode->vsync_end = 865; mode->vtotal = 873; mode->clock = 33264; } drm_mode_set_name(mode); drm_mode_set_crtcinfo(mode, 0); mode->type |= DRM_MODE_TYPE_PREFERRED; return mode; } static int tmd_vid_get_panel_info(struct drm_device *dev, int pipe, struct panel_info *pi) { if (!dev || !pi) return -EINVAL; pi->width_mm = TMD_PANEL_WIDTH; pi->height_mm = TMD_PANEL_HEIGHT; return 0; } /* ************************************************************************* *\ * FUNCTION: mdfld_init_TMD_MIPI * * DESCRIPTION: This function is called only by mrst_dsi_mode_set and * restore_display_registers. since this function does not * acquire the mutex, it is important that the calling function * does! \* ************************************************************************* */ /* FIXME: make the below data u8 instead of u32; note byte order! */ static u32 tmd_cmd_mcap_off[] = {0x000000b2}; static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef}; static u32 tmd_cmd_set_lane_num[] = {0x006360ef}; static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef}; static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef}; static u32 tmd_cmd_set_mode[] = {0x000000b3}; static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef}; static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df}; static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055}; static u32 tmd_cmd_set_video_mode[] = {0x00000153}; /*no auto_bl,need add in furture*/ static u32 tmd_cmd_enable_backlight[] = {0x00005ab4}; static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd}; static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config, int pipe) { struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config); DRM_INFO("Enter mdfld init TMD MIPI display.\n"); if (!sender) { DRM_ERROR("Cannot get sender\n"); return; } if (dsi_config->dvr_ic_inited) return; msleep(3); /* FIXME: make the below data u8 instead of u32; note byte order! */ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off, sizeof(tmd_cmd_mcap_off), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch, sizeof(tmd_cmd_enable_lane_switch), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num, sizeof(tmd_cmd_set_lane_num), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0, sizeof(tmd_cmd_pushing_clock0), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1, sizeof(tmd_cmd_pushing_clock1), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode, sizeof(tmd_cmd_set_mode), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode, sizeof(tmd_cmd_set_sync_pulse_mode), false); mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column, sizeof(tmd_cmd_set_column), false); mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page, sizeof(tmd_cmd_set_page), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode, sizeof(tmd_cmd_set_video_mode), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight, sizeof(tmd_cmd_enable_backlight), false); mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming, sizeof(tmd_cmd_set_backlight_dimming), false); dsi_config->dvr_ic_inited = 1; } /*TPO DPI encoder helper funcs*/ static const struct drm_encoder_helper_funcs mdfld_tpo_dpi_encoder_helper_funcs = { .dpms = mdfld_dsi_dpi_dpms, .mode_fixup = mdfld_dsi_dpi_mode_fixup, .prepare = mdfld_dsi_dpi_prepare, .mode_set = mdfld_dsi_dpi_mode_set, .commit = mdfld_dsi_dpi_commit, }; /*TPO DPI encoder funcs*/ static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; const struct panel_funcs mdfld_tmd_vid_funcs = { .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, .get_config_mode = &tmd_vid_get_config_mode, .get_panel_info = tmd_vid_get_panel_info, .reset = mdfld_dsi_panel_reset, .drv_ic_init = mdfld_dsi_tmd_drv_ic_init, };
gpl-2.0
jpihet/linux-omap
sound/core/seq/seq_compat.c
13094
4715
/* * 32bit -> 64bit ioctl wrapper for sequencer API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from seq.c */ #include <linux/compat.h> #include <linux/slab.h> struct snd_seq_port_info32 { struct snd_seq_addr addr; /* client/port numbers */ char name[64]; /* port name */ u32 capability; /* port capability bits */ u32 type; /* port type bits */ s32 midi_channels; /* channels per MIDI port */ s32 midi_voices; /* voices per MIDI port */ s32 synth_voices; /* voices per SYNTH port */ s32 read_use; /* R/O: subscribers for output (from this port) */ s32 write_use; /* R/O: subscribers for input (to this port) */ u32 kernel; /* reserved for kernel use (must be NULL) */ u32 flags; /* misc. conditioning */ unsigned char time_queue; /* queue # for timestamping */ char reserved[59]; /* for future use */ }; static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned int cmd, struct snd_seq_port_info32 __user *data32) { int err = -EFAULT; struct snd_seq_port_info *data; mm_segment_t fs; data = memdup_user(data32, sizeof(*data32)); if (IS_ERR(data)) return PTR_ERR(data); if (get_user(data->flags, &data32->flags) || get_user(data->time_queue, &data32->time_queue)) goto error; data->kernel = NULL; fs = snd_enter_user(); err = snd_seq_do_ioctl(client, cmd, data); snd_leave_user(fs); if (err < 0) goto error; if (copy_to_user(data32, data, sizeof(*data32)) || put_user(data->flags, &data32->flags) || put_user(data->time_queue, &data32->time_queue)) err = -EFAULT; error: kfree(data); return err; } /* */ enum { SNDRV_SEQ_IOCTL_CREATE_PORT32 = _IOWR('S', 0x20, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_DELETE_PORT32 = _IOW ('S', 0x21, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_GET_PORT_INFO32 = _IOWR('S', 0x22, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_SET_PORT_INFO32 = _IOW ('S', 0x23, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32 = _IOWR('S', 0x52, struct snd_seq_port_info32), }; static long snd_seq_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_seq_client *client = file->private_data; void __user *argp = compat_ptr(arg); if (snd_BUG_ON(!client)) return -ENXIO; switch (cmd) { case SNDRV_SEQ_IOCTL_PVERSION: case SNDRV_SEQ_IOCTL_CLIENT_ID: case SNDRV_SEQ_IOCTL_SYSTEM_INFO: case SNDRV_SEQ_IOCTL_GET_CLIENT_INFO: case SNDRV_SEQ_IOCTL_SET_CLIENT_INFO: case SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT: case SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT: case SNDRV_SEQ_IOCTL_CREATE_QUEUE: case SNDRV_SEQ_IOCTL_DELETE_QUEUE: case SNDRV_SEQ_IOCTL_GET_QUEUE_INFO: case SNDRV_SEQ_IOCTL_SET_QUEUE_INFO: case SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE: case SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS: case SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO: case SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO: case SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER: case SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER: case SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT: case SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT: case SNDRV_SEQ_IOCTL_GET_CLIENT_POOL: case SNDRV_SEQ_IOCTL_SET_CLIENT_POOL: case SNDRV_SEQ_IOCTL_REMOVE_EVENTS: case SNDRV_SEQ_IOCTL_QUERY_SUBS: case SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION: case SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT: case SNDRV_SEQ_IOCTL_RUNNING_MODE: return snd_seq_do_ioctl(client, cmd, argp); case SNDRV_SEQ_IOCTL_CREATE_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, argp); case SNDRV_SEQ_IOCTL_DELETE_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, argp); case SNDRV_SEQ_IOCTL_GET_PORT_INFO32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_GET_PORT_INFO, argp); case SNDRV_SEQ_IOCTL_SET_PORT_INFO32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_SET_PORT_INFO, argp); case SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, argp); } return -ENOIOCTLCMD; }
gpl-2.0
txuki2005/TaUrUs_Kernel
arch/sh/boards/mach-r2d/irq.c
13094
4550
/* * linux/arch/sh/boards/renesas/rts7751r2d/irq.c * * Copyright (C) 2007 Magnus Damm * Copyright (C) 2000 Kazumoto Kojima * * Renesas Technology Sales RTS7751R2D Support, R2D-PLUS and R2D-1. * * Modified for RTS7751R2D by * Atom Create Engineering Co., Ltd. 2002. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> #include <mach/r2d.h> #define R2D_NR_IRL 13 enum { UNUSED = 0, /* board specific interrupt sources (R2D-1 and R2D-PLUS) */ EXT, /* EXT_INT0-3 */ RTC_T, RTC_A, /* Real Time Clock */ AX88796, /* Ethernet controller (R2D-1 board) */ KEY, /* Key input (R2D-PLUS board) */ SDCARD, /* SD Card */ CF_CD, CF_IDE, /* CF Card Detect + CF IDE */ SM501, /* SM501 aka Voyager */ PCI_INTD_RTL8139, /* Ethernet controller */ PCI_INTC_PCI1520, /* Cardbus/PCMCIA bridge */ PCI_INTB_RTL8139, /* Ethernet controller with HUB (R2D-PLUS board) */ PCI_INTB_SLOT, /* PCI Slot 3.3v (R2D-1 board) */ PCI_INTA_SLOT, /* PCI Slot 3.3v */ TP, /* Touch Panel */ }; #ifdef CONFIG_RTS7751R2D_1 /* Vectors for R2D-1 */ static struct intc_vect vectors_r2d_1[] __initdata = { INTC_IRQ(EXT, IRQ_EXT), INTC_IRQ(RTC_T, IRQ_RTC_T), INTC_IRQ(RTC_A, IRQ_RTC_A), INTC_IRQ(AX88796, IRQ_AX88796), INTC_IRQ(SDCARD, IRQ_SDCARD), INTC_IRQ(CF_CD, IRQ_CF_CD), INTC_IRQ(CF_IDE, IRQ_CF_IDE), /* ng */ INTC_IRQ(SM501, IRQ_VOYAGER), INTC_IRQ(PCI_INTD_RTL8139, IRQ_PCI_INTD), INTC_IRQ(PCI_INTC_PCI1520, IRQ_PCI_INTC), INTC_IRQ(PCI_INTB_SLOT, IRQ_PCI_INTB), INTC_IRQ(PCI_INTA_SLOT, IRQ_PCI_INTA), INTC_IRQ(TP, IRQ_TP), }; /* IRLMSK mask register layout for R2D-1 */ static struct intc_mask_reg mask_registers_r2d_1[] __initdata = { { 0xa4000000, 0, 16, /* IRLMSK */ { TP, PCI_INTA_SLOT, PCI_INTB_SLOT, PCI_INTC_PCI1520, PCI_INTD_RTL8139, SM501, CF_IDE, CF_CD, SDCARD, AX88796, RTC_A, RTC_T, 0, 0, 0, EXT } }, }; /* IRLn to IRQ table for R2D-1 */ static unsigned char irl2irq_r2d_1[R2D_NR_IRL] __initdata = { IRQ_PCI_INTD, IRQ_CF_IDE, IRQ_CF_CD, IRQ_PCI_INTC, IRQ_VOYAGER, IRQ_AX88796, IRQ_RTC_A, IRQ_RTC_T, IRQ_SDCARD, IRQ_PCI_INTA, IRQ_PCI_INTB, IRQ_EXT, IRQ_TP, }; static DECLARE_INTC_DESC(intc_desc_r2d_1, "r2d-1", vectors_r2d_1, NULL, mask_registers_r2d_1, NULL, NULL); #endif /* CONFIG_RTS7751R2D_1 */ #ifdef CONFIG_RTS7751R2D_PLUS /* Vectors for R2D-PLUS */ static struct intc_vect vectors_r2d_plus[] __initdata = { INTC_IRQ(EXT, IRQ_EXT), INTC_IRQ(RTC_T, IRQ_RTC_T), INTC_IRQ(RTC_A, IRQ_RTC_A), INTC_IRQ(KEY, IRQ_KEY), INTC_IRQ(SDCARD, IRQ_SDCARD), INTC_IRQ(CF_CD, IRQ_CF_CD), INTC_IRQ(CF_IDE, IRQ_CF_IDE), INTC_IRQ(SM501, IRQ_VOYAGER), INTC_IRQ(PCI_INTD_RTL8139, IRQ_PCI_INTD), INTC_IRQ(PCI_INTC_PCI1520, IRQ_PCI_INTC), INTC_IRQ(PCI_INTB_RTL8139, IRQ_PCI_INTB), INTC_IRQ(PCI_INTA_SLOT, IRQ_PCI_INTA), INTC_IRQ(TP, IRQ_TP), }; /* IRLMSK mask register layout for R2D-PLUS */ static struct intc_mask_reg mask_registers_r2d_plus[] __initdata = { { 0xa4000000, 0, 16, /* IRLMSK */ { TP, PCI_INTA_SLOT, PCI_INTB_RTL8139, PCI_INTC_PCI1520, PCI_INTD_RTL8139, SM501, CF_IDE, CF_CD, SDCARD, KEY, RTC_A, RTC_T, 0, 0, 0, EXT } }, }; /* IRLn to IRQ table for R2D-PLUS */ static unsigned char irl2irq_r2d_plus[R2D_NR_IRL] __initdata = { IRQ_PCI_INTD, IRQ_CF_IDE, IRQ_CF_CD, IRQ_PCI_INTC, IRQ_VOYAGER, IRQ_KEY, IRQ_RTC_A, IRQ_RTC_T, IRQ_SDCARD, IRQ_PCI_INTA, IRQ_PCI_INTB, IRQ_EXT, IRQ_TP, }; static DECLARE_INTC_DESC(intc_desc_r2d_plus, "r2d-plus", vectors_r2d_plus, NULL, mask_registers_r2d_plus, NULL, NULL); #endif /* CONFIG_RTS7751R2D_PLUS */ static unsigned char irl2irq[R2D_NR_IRL]; int rts7751r2d_irq_demux(int irq) { if (irq >= R2D_NR_IRL || irq < 0 || !irl2irq[irq]) return irq; return irl2irq[irq]; } /* * Initialize IRQ setting */ void __init init_rts7751r2d_IRQ(void) { struct intc_desc *d; switch (__raw_readw(PA_VERREG) & 0xf0) { #ifdef CONFIG_RTS7751R2D_PLUS case 0x10: printk(KERN_INFO "Using R2D-PLUS interrupt controller.\n"); d = &intc_desc_r2d_plus; memcpy(irl2irq, irl2irq_r2d_plus, R2D_NR_IRL); break; #endif #ifdef CONFIG_RTS7751R2D_1 case 0x00: /* according to manual */ case 0x30: /* in reality */ printk(KERN_INFO "Using R2D-1 interrupt controller.\n"); d = &intc_desc_r2d_1; memcpy(irl2irq, irl2irq_r2d_1, R2D_NR_IRL); break; #endif default: printk(KERN_INFO "Unknown R2D interrupt controller 0x%04x\n", __raw_readw(PA_VERREG)); return; } register_intc_controller(d); }
gpl-2.0
Alucard24/Alucard-Kernel-LG-G5
drivers/media/pci/ivtv/ivtv-mailbox.c
13094
13758
/* mailbox functions Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <stdarg.h> #include "ivtv-driver.h" #include "ivtv-mailbox.h" /* Firmware mailbox flags*/ #define IVTV_MBOX_FIRMWARE_DONE 0x00000004 #define IVTV_MBOX_DRIVER_DONE 0x00000002 #define IVTV_MBOX_DRIVER_BUSY 0x00000001 #define IVTV_MBOX_FREE 0x00000000 /* Firmware mailbox standard timeout */ #define IVTV_API_STD_TIMEOUT 0x02000000 #define API_CACHE (1 << 0) /* Allow the command to be stored in the cache */ #define API_RESULT (1 << 1) /* Allow 1 second for this cmd to end */ #define API_FAST_RESULT (3 << 1) /* Allow 0.1 second for this cmd to end */ #define API_DMA (1 << 3) /* DMA mailbox, has special handling */ #define API_HIGH_VOL (1 << 5) /* High volume command (i.e. called during encoding or decoding) */ #define API_NO_WAIT_MB (1 << 4) /* Command may not wait for a free mailbox */ #define API_NO_WAIT_RES (1 << 5) /* Command may not wait for the result */ #define API_NO_POLL (1 << 6) /* Avoid pointless polling */ struct ivtv_api_info { int flags; /* Flags, see above */ const char *name; /* The name of the command */ }; #define API_ENTRY(x, f) [x] = { (f), #x } static const struct ivtv_api_info api_info[256] = { /* MPEG encoder API */ API_ENTRY(CX2341X_ENC_PING_FW, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_START_CAPTURE, API_RESULT | API_NO_POLL), API_ENTRY(CX2341X_ENC_STOP_CAPTURE, API_RESULT), API_ENTRY(CX2341X_ENC_SET_AUDIO_ID, API_CACHE), API_ENTRY(CX2341X_ENC_SET_VIDEO_ID, API_CACHE), API_ENTRY(CX2341X_ENC_SET_PCR_ID, API_CACHE), API_ENTRY(CX2341X_ENC_SET_FRAME_RATE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_FRAME_SIZE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_BIT_RATE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_GOP_PROPERTIES, API_CACHE), API_ENTRY(CX2341X_ENC_SET_ASPECT_RATIO, API_CACHE), API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_MODE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_PROPS, API_CACHE), API_ENTRY(CX2341X_ENC_SET_CORING_LEVELS, API_CACHE), API_ENTRY(CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_VBI_LINE, API_RESULT), API_ENTRY(CX2341X_ENC_SET_STREAM_TYPE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_OUTPUT_PORT, API_CACHE), API_ENTRY(CX2341X_ENC_SET_AUDIO_PROPERTIES, API_CACHE), API_ENTRY(CX2341X_ENC_HALT_FW, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_GET_VERSION, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_SET_GOP_CLOSURE, API_CACHE), API_ENTRY(CX2341X_ENC_GET_SEQ_END, API_RESULT), API_ENTRY(CX2341X_ENC_SET_PGM_INDEX_INFO, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_SET_VBI_CONFIG, API_RESULT), API_ENTRY(CX2341X_ENC_SET_DMA_BLOCK_SIZE, API_CACHE), API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_10, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_9, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA | API_HIGH_VOL), API_ENTRY(CX2341X_ENC_INITIALIZE_INPUT, API_RESULT), API_ENTRY(CX2341X_ENC_SET_FRAME_DROP_RATE, API_CACHE), API_ENTRY(CX2341X_ENC_PAUSE_ENCODER, API_RESULT), API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB | API_HIGH_VOL), API_ENTRY(CX2341X_ENC_SET_COPYRIGHT, API_CACHE), API_ENTRY(CX2341X_ENC_SET_EVENT_NOTIFICATION, API_RESULT), API_ENTRY(CX2341X_ENC_SET_NUM_VSYNC_LINES, API_CACHE), API_ENTRY(CX2341X_ENC_SET_PLACEHOLDER, API_CACHE), API_ENTRY(CX2341X_ENC_MUTE_VIDEO, API_RESULT), API_ENTRY(CX2341X_ENC_MUTE_AUDIO, API_RESULT), API_ENTRY(CX2341X_ENC_SET_VERT_CROP_LINE, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_MISC, API_FAST_RESULT), /* Obsolete PULLDOWN API command */ API_ENTRY(0xb1, API_CACHE), /* MPEG decoder API */ API_ENTRY(CX2341X_DEC_PING_FW, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_START_PLAYBACK, API_RESULT | API_NO_POLL), API_ENTRY(CX2341X_DEC_STOP_PLAYBACK, API_RESULT), API_ENTRY(CX2341X_DEC_SET_PLAYBACK_SPEED, API_RESULT), API_ENTRY(CX2341X_DEC_STEP_VIDEO, API_RESULT), API_ENTRY(CX2341X_DEC_SET_DMA_BLOCK_SIZE, API_CACHE), API_ENTRY(CX2341X_DEC_GET_XFER_INFO, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_GET_DMA_STATUS, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA | API_HIGH_VOL), API_ENTRY(CX2341X_DEC_PAUSE_PLAYBACK, API_RESULT), API_ENTRY(CX2341X_DEC_HALT_FW, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SET_STANDARD, API_CACHE), API_ENTRY(CX2341X_DEC_GET_VERSION, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SET_STREAM_INPUT, API_CACHE), API_ENTRY(CX2341X_DEC_GET_TIMING_INFO, API_RESULT /*| API_NO_WAIT_RES*/), API_ENTRY(CX2341X_DEC_SET_AUDIO_MODE, API_CACHE), API_ENTRY(CX2341X_DEC_SET_EVENT_NOTIFICATION, API_RESULT), API_ENTRY(CX2341X_DEC_SET_DISPLAY_BUFFERS, API_CACHE), API_ENTRY(CX2341X_DEC_EXTRACT_VBI, API_RESULT), API_ENTRY(CX2341X_DEC_SET_DECODER_SOURCE, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SET_PREBUFFERING, API_CACHE), /* OSD API */ API_ENTRY(CX2341X_OSD_GET_FRAMEBUFFER, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_GET_PIXEL_FORMAT, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_PIXEL_FORMAT, API_CACHE), API_ENTRY(CX2341X_OSD_GET_STATE, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_STATE, API_CACHE), API_ENTRY(CX2341X_OSD_GET_OSD_COORDS, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_OSD_COORDS, API_CACHE), API_ENTRY(CX2341X_OSD_GET_SCREEN_COORDS, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_SCREEN_COORDS, API_CACHE), API_ENTRY(CX2341X_OSD_GET_GLOBAL_ALPHA, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_GLOBAL_ALPHA, API_CACHE), API_ENTRY(CX2341X_OSD_SET_BLEND_COORDS, API_CACHE), API_ENTRY(CX2341X_OSD_GET_FLICKER_STATE, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_FLICKER_STATE, API_CACHE), API_ENTRY(CX2341X_OSD_BLT_COPY, API_RESULT), API_ENTRY(CX2341X_OSD_BLT_FILL, API_RESULT), API_ENTRY(CX2341X_OSD_BLT_TEXT, API_RESULT), API_ENTRY(CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, API_CACHE), API_ENTRY(CX2341X_OSD_SET_CHROMA_KEY, API_CACHE), API_ENTRY(CX2341X_OSD_GET_ALPHA_CONTENT_INDEX, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_ALPHA_CONTENT_INDEX, API_CACHE) }; static int try_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int mb) { u32 flags = readl(&mbdata->mbox[mb].flags); int is_free = flags == IVTV_MBOX_FREE || (flags & IVTV_MBOX_FIRMWARE_DONE); /* if the mailbox is free, then try to claim it */ if (is_free && !test_and_set_bit(mb, &mbdata->busy)) { write_sync(IVTV_MBOX_DRIVER_BUSY, &mbdata->mbox[mb].flags); return 1; } return 0; } /* Try to find a free mailbox. Note mailbox 0 is reserved for DMA and so is not attempted here. */ static int get_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int flags) { unsigned long then = jiffies; int i, mb; int max_mbox = mbdata->max_mbox; int retries = 100; /* All slow commands use the same mailbox, serializing them and also leaving the other mailbox free for simple fast commands. */ if ((flags & API_FAST_RESULT) == API_RESULT) max_mbox = 1; /* find free non-DMA mailbox */ for (i = 0; i < retries; i++) { for (mb = 1; mb <= max_mbox; mb++) if (try_mailbox(itv, mbdata, mb)) return mb; /* Sleep before a retry, if not atomic */ if (!(flags & API_NO_WAIT_MB)) { if (time_after(jiffies, then + msecs_to_jiffies(10*retries))) break; ivtv_msleep_timeout(10, 0); } } return -ENODEV; } static void write_mailbox(volatile struct ivtv_mailbox __iomem *mbox, int cmd, int args, u32 data[]) { int i; write_sync(cmd, &mbox->cmd); write_sync(IVTV_API_STD_TIMEOUT, &mbox->timeout); for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) write_sync(data[i], &mbox->data[i]); write_sync(IVTV_MBOX_DRIVER_DONE | IVTV_MBOX_DRIVER_BUSY, &mbox->flags); } static void clear_all_mailboxes(struct ivtv *itv, struct ivtv_mailbox_data *mbdata) { int i; for (i = 0; i <= mbdata->max_mbox; i++) { IVTV_DEBUG_WARN("Clearing mailbox %d: cmd 0x%08x flags 0x%08x\n", i, readl(&mbdata->mbox[i].cmd), readl(&mbdata->mbox[i].flags)); write_sync(0, &mbdata->mbox[i].flags); clear_bit(i, &mbdata->busy); } } static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[]) { struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox; volatile struct ivtv_mailbox __iomem *mbox; int api_timeout = msecs_to_jiffies(1000); int flags, mb, i; unsigned long then; /* sanity checks */ if (NULL == mbdata) { IVTV_ERR("No mailbox allocated\n"); return -ENODEV; } if (args < 0 || args > CX2341X_MBOX_MAX_DATA || cmd < 0 || cmd > 255 || api_info[cmd].name == NULL) { IVTV_ERR("Invalid MB call: cmd = 0x%02x, args = %d\n", cmd, args); return -EINVAL; } if (api_info[cmd].flags & API_HIGH_VOL) { IVTV_DEBUG_HI_MB("MB Call: %s\n", api_info[cmd].name); } else { IVTV_DEBUG_MB("MB Call: %s\n", api_info[cmd].name); } /* clear possibly uninitialized part of data array */ for (i = args; i < CX2341X_MBOX_MAX_DATA; i++) data[i] = 0; /* If this command was issued within the last 30 minutes and with identical data, then just return 0 as there is no need to issue this command again. Just an optimization to prevent unnecessary use of mailboxes. */ if (itv->api_cache[cmd].last_jiffies && time_before(jiffies, itv->api_cache[cmd].last_jiffies + msecs_to_jiffies(1800000)) && !memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) { itv->api_cache[cmd].last_jiffies = jiffies; return 0; } flags = api_info[cmd].flags; if (flags & API_DMA) { for (i = 0; i < 100; i++) { mb = i % (mbdata->max_mbox + 1); if (try_mailbox(itv, mbdata, mb)) { write_mailbox(&mbdata->mbox[mb], cmd, args, data); clear_bit(mb, &mbdata->busy); return 0; } IVTV_DEBUG_WARN("%s: mailbox %d not free %08x\n", api_info[cmd].name, mb, readl(&mbdata->mbox[mb].flags)); } IVTV_WARN("Could not find free DMA mailbox for %s\n", api_info[cmd].name); clear_all_mailboxes(itv, mbdata); return -EBUSY; } if ((flags & API_FAST_RESULT) == API_FAST_RESULT) api_timeout = msecs_to_jiffies(100); mb = get_mailbox(itv, mbdata, flags); if (mb < 0) { IVTV_DEBUG_WARN("No free mailbox found (%s)\n", api_info[cmd].name); clear_all_mailboxes(itv, mbdata); return -EBUSY; } mbox = &mbdata->mbox[mb]; write_mailbox(mbox, cmd, args, data); if (flags & API_CACHE) { memcpy(itv->api_cache[cmd].data, data, sizeof(itv->api_cache[cmd].data)); itv->api_cache[cmd].last_jiffies = jiffies; } if ((flags & API_RESULT) == 0) { clear_bit(mb, &mbdata->busy); return 0; } /* Get results */ then = jiffies; if (!(flags & API_NO_POLL)) { /* First try to poll, then switch to delays */ for (i = 0; i < 100; i++) { if (readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE) break; } } while (!(readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)) { if (time_after(jiffies, then + api_timeout)) { IVTV_DEBUG_WARN("Could not get result (%s)\n", api_info[cmd].name); /* reset the mailbox, but it is likely too late already */ write_sync(0, &mbox->flags); clear_bit(mb, &mbdata->busy); return -EIO; } if (flags & API_NO_WAIT_RES) mdelay(1); else ivtv_msleep_timeout(1, 0); } if (time_after(jiffies, then + msecs_to_jiffies(100))) IVTV_DEBUG_WARN("%s took %u jiffies\n", api_info[cmd].name, jiffies_to_msecs(jiffies - then)); for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) data[i] = readl(&mbox->data[i]); write_sync(0, &mbox->flags); clear_bit(mb, &mbdata->busy); return 0; } int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[]) { int res = ivtv_api_call(itv, cmd, args, data); /* Allow a single retry, probably already too late though. If there is no free mailbox then that is usually an indication of a more serious problem. */ return (res == -EBUSY) ? ivtv_api_call(itv, cmd, args, data) : res; } int ivtv_api_func(void *priv, u32 cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]) { return ivtv_api(priv, cmd, in, data); } int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...) { va_list ap; int i; va_start(ap, args); for (i = 0; i < args; i++) { data[i] = va_arg(ap, u32); } va_end(ap); return ivtv_api(itv, cmd, args, data); } int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list ap; int i; va_start(ap, args); for (i = 0; i < args; i++) { data[i] = va_arg(ap, u32); } va_end(ap); return ivtv_api(itv, cmd, args, data); } /* This one is for stuff that can't sleep.. irq handlers, etc.. */ void ivtv_api_get_data(struct ivtv_mailbox_data *mbdata, int mb, int argc, u32 data[]) { volatile u32 __iomem *p = mbdata->mbox[mb].data; int i; for (i = 0; i < argc; i++, p++) data[i] = readl(p); } /* Wipe api cache */ void ivtv_mailbox_cache_invalidate(struct ivtv *itv) { int i; for (i = 0; i < 256; i++) itv->api_cache[i].last_jiffies = 0; }
gpl-2.0
jkkm/binutils-gdb
gas/literal.c
39
3135
/* literal.c - GAS literal pool management. Copyright (C) 1994-2014 Free Software Foundation, Inc. Written by Ken Raeburn (raeburn@cygnus.com). This file is part of GAS, the GNU Assembler. GAS is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GAS; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* This isn't quite a "constant" pool. Some of the values may get adjusted at run time, e.g., for symbolic relocations when shared libraries are in use. It's more of a "literal" pool. On the Alpha, this should be used for .lita and .lit8. (Is there ever a .lit4?) On the MIPS, it could be used for .lit4 as well. The expressions passed here should contain either constants or symbols, not a combination of both. Typically, the constant pool is accessed with some sort of GP register, so the size of the pool must be kept down if possible. The exception is section offsets -- if you're storing a pointer to the start of .data, for example, and your machine provides for 16-bit signed addends, you might want to store .data+32K, so that you can access all of the first 64K of .data with the one pointer. This isn't a requirement, just a guideline that can help keep .o file size down. */ #include "as.h" #include "subsegs.h" #ifdef NEED_LITERAL_POOL valueT add_to_literal_pool (sym, addend, sec, size) symbolS *sym; valueT addend; segT sec; int size; { segT current_section = now_seg; int current_subsec = now_subseg; valueT offset; bfd_reloc_code_real_type reloc_type; char *p; segment_info_type *seginfo = seg_info (sec); fixS *fixp; offset = 0; /* @@ This assumes all entries in a given section will be of the same size... Probably correct, but unwise to rely on. */ /* This must always be called with the same subsegment. */ if (seginfo->frchainP) for (fixp = seginfo->frchainP->fix_root; fixp != (fixS *) NULL; fixp = fixp->fx_next, offset += size) { if (fixp->fx_addsy == sym && fixp->fx_offset == addend) return offset; } subseg_set (sec, 0); p = frag_more (size); memset (p, 0, size); switch (size) { case 4: reloc_type = BFD_RELOC_32; break; case 8: reloc_type = BFD_RELOC_64; break; default: abort (); } fix_new (frag_now, p - frag_now->fr_literal, size, sym, addend, 0, reloc_type); subseg_set (current_section, current_subsec); offset = seginfo->literal_pool_size; seginfo->literal_pool_size += size; return offset; } #endif
gpl-2.0
mattkelly/linux-2.6-xlnx
fs/ocfs2/dlm/dlmconvert.c
39
15562
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmconvert.c * * underlying calls for lock conversion * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" /* NOTE: __dlmconvert_master is the only function in here that * needs a spinlock held on entry (res->spinlock) and it is the * only one that holds a lock on exit (res->spinlock). * All other functions in here need no locks and drop all of * the locks that they acquire. */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread); static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type); /* * this is only called directly by dlmlock(), and only when the * local node is the owner of the lockres * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: see __dlmconvert_master */ enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { int call_ast = 0, kick_thread = 0; enum dlm_status status; spin_lock(&res->spinlock); /* we are not in a network handler, this is fine */ __dlm_wait_on_lockres(res); __dlm_lockres_reserve_ast(res); res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); if (status != DLM_NORMAL && status != DLM_NOTQUEUED) dlm_error(status); /* either queue the ast or release it */ if (call_ast) dlm_queue_ast(dlm, lock); else dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); return status; } /* performs lock conversion at the lockres master site * locking: * caller needs: res->spinlock * taken: takes and drops lock->spinlock * held on exit: res->spinlock * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED * call_ast: whether ast should be called for this lock * kick_thread: whether dlm_kick_thread should be called */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread) { enum dlm_status status = DLM_NORMAL; struct list_head *iter; struct dlm_lock *tmplock=NULL; assert_spin_locked(&res->spinlock); mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", lock->ml.type, lock->ml.convert_type, type); spin_lock(&lock->spinlock); /* already converting? */ if (lock->ml.convert_type != LKM_IVMODE) { mlog(ML_ERROR, "attempted to convert a lock with a lock " "conversion pending\n"); status = DLM_DENIED; goto unlock_exit; } /* must be on grant queue to convert */ if (!dlm_lock_on_list(&res->granted, lock)) { mlog(ML_ERROR, "attempted to convert a lock not on grant " "queue\n"); status = DLM_DENIED; goto unlock_exit; } if (flags & LKM_VALBLK) { switch (lock->ml.type) { case LKM_EXMODE: /* EX + LKM_VALBLK + convert == set lvb */ mlog(0, "will set lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_PUT_LVB; break; case LKM_PRMODE: case LKM_NLMODE: /* refetch if new level is not NL */ if (type > LKM_NLMODE) { mlog(0, "will fetch new value into " "lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_GET_LVB; } else { mlog(0, "will NOT fetch new value " "into lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); flags &= ~(LKM_VALBLK); } break; } } /* in-place downconvert? */ if (type <= lock->ml.type) goto grant; /* upconvert from here on */ status = DLM_NORMAL; list_for_each(iter, &res->granted) { tmplock = list_entry(iter, struct dlm_lock, list); if (tmplock == lock) continue; if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; } list_for_each(iter, &res->converting) { tmplock = list_entry(iter, struct dlm_lock, list); if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; /* existing conversion requests take precedence */ if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) goto switch_queues; } /* fall thru to grant */ grant: mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, res->lockname.name, dlm_lock_mode_name(type)); /* immediately grant the new lock type */ lock->lksb->status = DLM_NORMAL; if (lock->ml.node == dlm->node_num) mlog(0, "doing in-place convert for nonlocal lock\n"); lock->ml.type = type; if (lock->lksb->flags & DLM_LKSB_PUT_LVB) memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; switch_queues: if (flags & LKM_NOQUEUE) { mlog(0, "failed to convert NOQUEUE lock %.*s from " "%d to %d...\n", res->lockname.len, res->lockname.name, lock->ml.type, type); status = DLM_NOTQUEUED; goto unlock_exit; } mlog(0, "res %.*s, queueing...\n", res->lockname.len, res->lockname.name); lock->ml.convert_type = type; /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); unlock_exit: spin_unlock(&lock->spinlock); if (status == DLM_DENIED) { __dlm_print_one_lock_resource(res); } if (status == DLM_NORMAL) *kick_thread = 1; return status; } void dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } /* messages the master site to do lock conversion * locking: * caller needs: none * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS * held on exit: none * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node */ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { mlog(0, "bailing out early since res is RECOVERING " "on secondary queue\n"); /* __dlm_print_one_lock_resource(res); */ status = DLM_RECOVERING; goto bail; } /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); if (lock->ml.convert_type != LKM_IVMODE) { __dlm_print_one_lock_resource(res); mlog(ML_ERROR, "converting a remote lock that is already " "converting! (cookie=%u:%llu, conv=%d)\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->ml.convert_type); status = DLM_DENIED; goto bail; } res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); lock->convert_pending = 1; lock->ml.convert_type = type; if (flags & LKM_VALBLK) { if (lock->ml.type == LKM_EXMODE) { flags |= LKM_PUT_LVB; lock->lksb->flags |= DLM_LKSB_PUT_LVB; } else { if (lock->ml.convert_type == LKM_NLMODE) flags &= ~LKM_VALBLK; else { flags |= LKM_GET_LVB; lock->lksb->flags |= DLM_LKSB_GET_LVB; } } } spin_unlock(&res->spinlock); /* no locks held here. * need to wait for a reply as to whether it got queued or not. */ status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->convert_pending = 0; /* if it failed, move it back to granted queue */ if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); } bail: spin_unlock(&res->spinlock); /* TODO: should this be a wake_one? */ /* wake up any IN_PROGRESS waiters */ wake_up(&res->wq); return status; } /* sends DLM_CONVERT_LOCK_MSG to master site * locking: * caller needs: none * taken: none * held on exit: none * returns: DLM_NOLOCKMGR, status from remote node */ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { struct dlm_convert_lock convert; int tmpret; enum dlm_status ret; int status = 0; struct kvec vec[2]; size_t veclen = 1; mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); memset(&convert, 0, sizeof(struct dlm_convert_lock)); convert.node_idx = dlm->node_num; convert.requested_type = type; convert.cookie = lock->ml.cookie; convert.namelen = res->lockname.len; convert.flags = cpu_to_be32(flags); memcpy(convert.name, res->lockname.name, convert.namelen); vec[0].iov_len = sizeof(struct dlm_convert_lock); vec[0].iov_base = &convert; if (flags & LKM_PUT_LVB) { /* extra data to send if we are updating lvb */ vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, vec, veclen, res->owner, &status); if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status if (ret == DLM_RECOVERING) { mlog(0, "node %u returned DLM_RECOVERING from convert " "message!\n", res->owner); } else if (ret == DLM_MIGRATING) { mlog(0, "node %u returned DLM_MIGRATING from convert " "message!\n", res->owner); } else if (ret == DLM_FORWARD) { mlog(0, "node %u returned DLM_FORWARD from convert " "message!\n", res->owner); } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) dlm_error(ret); } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " "from convert message!\n", res->owner); } else { ret = dlm_err_to_dlm_status(tmpret); } } return ret; } /* handler for DLM_CONVERT_LOCK_MSG on master site * locking: * caller needs: none * taken: takes and drop res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, * status from __dlmconvert_master */ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; struct dlm_lock_resource *res = NULL; struct list_head *iter; struct dlm_lock *lock = NULL; struct dlm_lockstatus *lksb; enum dlm_status status = DLM_NORMAL; u32 flags; int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); return DLM_REJECTED; } mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), "Domain %s not fully joined!\n", dlm->name); if (cnv->namelen > DLM_LOCKID_NAME_MAX) { status = DLM_IVBUFLEN; dlm_error(status); goto leave; } flags = be32_to_cpu(cnv->flags); if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == (LKM_PUT_LVB|LKM_GET_LVB)) { mlog(ML_ERROR, "both PUT and GET lvb specified\n"); status = DLM_BADARGS; goto leave; } mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : (flags & LKM_GET_LVB ? "get lvb" : "none")); status = DLM_IVLOCKID; res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); if (!res) { dlm_error(status); goto leave; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status != DLM_NORMAL) { spin_unlock(&res->spinlock); dlm_error(status); goto leave; } list_for_each(iter, &res->granted) { lock = list_entry(iter, struct dlm_lock, list); if (lock->ml.cookie == cnv->cookie && lock->ml.node == cnv->node_idx) { dlm_lock_get(lock); break; } lock = NULL; } spin_unlock(&res->spinlock); if (!lock) { status = DLM_IVLOCKID; mlog(ML_ERROR, "did not find lock to convert on grant queue! " "cookie=%u:%llu\n", dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); dlm_print_one_lock_resource(res); goto leave; } /* found the lock */ lksb = lock->lksb; /* see if caller needed to get/put lvb */ if (flags & LKM_PUT_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_PUT_LVB; memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); } else if (flags & LKM_GET_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_GET_LVB; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status == DLM_NORMAL) { __dlm_lockres_reserve_ast(res); ast_reserved = 1; res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, cnv->requested_type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; wake = 1; } spin_unlock(&res->spinlock); if (wake) wake_up(&res->wq); if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } leave: if (lock) dlm_lock_put(lock); /* either queue the ast or release it, if reserved */ if (call_ast) dlm_queue_ast(dlm, lock); else if (ast_reserved) dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); if (res) dlm_lockres_put(res); dlm_put(dlm); return status; }
gpl-2.0
PrestigeMod/Seine
drivers/thermal/thermal_sys.c
39
35300
/* * thermal.c - Generic Thermal Management Sysfs support. * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/kdev_t.h> #include <linux/idr.h> #include <linux/thermal.h> #include <linux/spinlock.h> #include <linux/reboot.h> #include <net/netlink.h> #include <net/genetlink.h> MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Generic thermal management sysfs support"); MODULE_LICENSE("GPL"); #define PREFIX "Thermal: " struct thermal_cooling_device_instance { int id; char name[THERMAL_NAME_LENGTH]; struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; int trip; char attr_name[THERMAL_NAME_LENGTH]; struct device_attribute attr; struct list_head node; }; static DEFINE_IDR(thermal_tz_idr); static DEFINE_IDR(thermal_cdev_idr); static DEFINE_MUTEX(thermal_idr_lock); static LIST_HEAD(thermal_tz_list); static LIST_HEAD(thermal_cdev_list); static DEFINE_MUTEX(thermal_list_lock); static unsigned int thermal_event_seqnum; static int get_idr(struct idr *idr, struct mutex *lock, int *id) { int err; again: if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) return -ENOMEM; if (lock) mutex_lock(lock); err = idr_get_new(idr, NULL, id); if (lock) mutex_unlock(lock); if (unlikely(err == -EAGAIN)) goto again; else if (unlikely(err)) return err; *id = *id & MAX_ID_MASK; return 0; } static void release_idr(struct idr *idr, struct mutex *lock, int id) { if (lock) mutex_lock(lock); idr_remove(idr, id); if (lock) mutex_unlock(lock); } /* sys I/F for thermal zone */ #define to_thermal_zone(_dev) \ container_of(_dev, struct thermal_zone_device, device) static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%s\n", tz->type); } static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); long temperature; int ret; if (!tz->ops->get_temp) return -EPERM; ret = tz->ops->get_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%ld\n", temperature); } static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); enum thermal_device_mode mode; int result; if (!tz->ops->get_mode) return -EPERM; result = tz->ops->get_mode(tz, &mode); if (result) return result; return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" : "disabled"); } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int result; if (!tz->ops->set_mode) return -EPERM; if (!strncmp(buf, "enabled", sizeof("enabled"))) result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); else if (!strncmp(buf, "disabled", sizeof("disabled"))) result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); else result = -EINVAL; if (result) return result; return count; } static ssize_t trip_point_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); enum thermal_trip_type type; int trip, result; if (!tz->ops->get_trip_type) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) return -EINVAL; result = tz->ops->get_trip_type(tz, trip, &type); if (result) return result; switch (type) { case THERMAL_TRIP_CRITICAL: return sprintf(buf, "critical\n"); case THERMAL_TRIP_HOT: return sprintf(buf, "hot\n"); case THERMAL_TRIP_PASSIVE: return sprintf(buf, "passive\n"); case THERMAL_TRIP_ACTIVE: return sprintf(buf, "active\n"); case THERMAL_TRIP_STATE_ACTIVE: return sprintf(buf, "state-active\n"); default: return sprintf(buf, "unknown\n"); } } static ssize_t trip_point_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); int trip, ret; long temperature; if (!tz->ops->get_trip_temp) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) return -EINVAL; ret = tz->ops->get_trip_temp(tz, trip, &temperature); if (ret) return ret; return sprintf(buf, "%ld\n", temperature); } static ssize_t passive_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_cooling_device *cdev = NULL; int state; if (!sscanf(buf, "%d\n", &state)) return -EINVAL; /* sanity check: values below 1000 millicelcius don't make sense * and can cause the system to go into a thermal heart attack */ if (state && state < 1000) return -EINVAL; if (state && !tz->forced_passive) { mutex_lock(&thermal_list_lock); list_for_each_entry(cdev, &thermal_cdev_list, node) { if (!strncmp("Processor", cdev->type, sizeof("Processor"))) thermal_zone_bind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev); } mutex_unlock(&thermal_list_lock); if (!tz->passive_delay) tz->passive_delay = 1000; } else if (!state && tz->forced_passive) { mutex_lock(&thermal_list_lock); list_for_each_entry(cdev, &thermal_cdev_list, node) { if (!strncmp("Processor", cdev->type, sizeof("Processor"))) thermal_zone_unbind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev); } mutex_unlock(&thermal_list_lock); tz->passive_delay = 0; } tz->tc1 = 1; tz->tc2 = 1; tz->forced_passive = state; thermal_zone_device_update(tz); return count; } static ssize_t passive_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%d\n", tz->forced_passive); } static DEVICE_ATTR(type, 0444, type_show, NULL); static DEVICE_ATTR(temp, 0444, temp_show, NULL); static DEVICE_ATTR(mode, 0644, mode_show, mode_store); static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, \ passive_store); static struct device_attribute trip_point_attrs[] = { __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_0_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_1_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_1_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_2_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_2_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_3_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_3_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_4_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_4_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_5_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_5_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_6_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_6_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_7_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_7_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_8_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_8_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_9_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_9_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_10_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_10_temp, 0444, trip_point_temp_show, NULL), __ATTR(trip_point_11_type, 0444, trip_point_type_show, NULL), __ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL), }; #define TRIP_POINT_ATTR_ADD(_dev, _index, result) \ do { \ result = device_create_file(_dev, \ &trip_point_attrs[_index * 2]); \ if (result) \ break; \ result = device_create_file(_dev, \ &trip_point_attrs[_index * 2 + 1]); \ } while (0) #define TRIP_POINT_ATTR_REMOVE(_dev, _index) \ do { \ device_remove_file(_dev, &trip_point_attrs[_index * 2]); \ device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \ } while (0) /* sys I/F for cooling device */ #define to_cooling_device(_dev) \ container_of(_dev, struct thermal_cooling_device, device) static ssize_t thermal_cooling_device_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); return sprintf(buf, "%s\n", cdev->type); } static ssize_t thermal_cooling_device_max_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int ret; ret = cdev->ops->get_max_state(cdev, &state); if (ret) return ret; return sprintf(buf, "%ld\n", state); } static ssize_t thermal_cooling_device_cur_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int ret; ret = cdev->ops->get_cur_state(cdev, &state); if (ret) return ret; return sprintf(buf, "%ld\n", state); } static ssize_t thermal_cooling_device_cur_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int result; if (!sscanf(buf, "%ld\n", &state)) return -EINVAL; if ((long)state < 0) return -EINVAL; result = cdev->ops->set_cur_state(cdev, state); if (result) return result; return count; } static struct device_attribute dev_attr_cdev_type = __ATTR(type, 0444, thermal_cooling_device_type_show, NULL); static DEVICE_ATTR(max_state, 0444, thermal_cooling_device_max_state_show, NULL); static DEVICE_ATTR(cur_state, 0644, thermal_cooling_device_cur_state_show, thermal_cooling_device_cur_state_store); static ssize_t thermal_cooling_device_trip_point_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device_instance *instance; instance = container_of(attr, struct thermal_cooling_device_instance, attr); if (instance->trip == THERMAL_TRIPS_NONE) return sprintf(buf, "-1\n"); else return sprintf(buf, "%d\n", instance->trip); } /* Device management */ #if defined(CONFIG_THERMAL_HWMON) /* hwmon sys I/F */ #include <linux/hwmon.h> static LIST_HEAD(thermal_hwmon_list); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); return sprintf(buf, "%s\n", hwmon->type); } static DEVICE_ATTR(name, 0444, name_show, NULL); static ssize_t temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) { long temperature; int ret; struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); struct thermal_zone_device *tz = container_of(hwmon_attr, struct thermal_zone_device, temp_input); ret = tz->ops->get_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%ld\n", temperature); } static ssize_t temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); struct thermal_zone_device *tz = container_of(hwmon_attr, struct thermal_zone_device, temp_crit); long temperature; int ret; ret = tz->ops->get_trip_temp(tz, 0, &temperature); if (ret) return ret; return sprintf(buf, "%ld\n", temperature); } static int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; int new_hwmon_device = 1; int result; mutex_lock(&thermal_list_lock); list_for_each_entry(hwmon, &thermal_hwmon_list, node) if (!strcmp(hwmon->type, tz->type)) { new_hwmon_device = 0; mutex_unlock(&thermal_list_lock); goto register_sys_interface; } mutex_unlock(&thermal_list_lock); hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); if (!hwmon) return -ENOMEM; INIT_LIST_HEAD(&hwmon->tz_list); strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); hwmon->device = hwmon_device_register(NULL); if (IS_ERR(hwmon->device)) { result = PTR_ERR(hwmon->device); goto free_mem; } dev_set_drvdata(hwmon->device, hwmon); result = device_create_file(hwmon->device, &dev_attr_name); if (result) goto free_mem; register_sys_interface: tz->hwmon = hwmon; hwmon->count++; snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH, "temp%d_input", hwmon->count); tz->temp_input.attr.attr.name = tz->temp_input.name; tz->temp_input.attr.attr.mode = 0444; tz->temp_input.attr.show = temp_input_show; sysfs_attr_init(&tz->temp_input.attr.attr); result = device_create_file(hwmon->device, &tz->temp_input.attr); if (result) goto unregister_name; if (tz->ops->get_crit_temp) { unsigned long temperature; if (!tz->ops->get_crit_temp(tz, &temperature)) { snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH, "temp%d_crit", hwmon->count); tz->temp_crit.attr.attr.name = tz->temp_crit.name; tz->temp_crit.attr.attr.mode = 0444; tz->temp_crit.attr.show = temp_crit_show; sysfs_attr_init(&tz->temp_crit.attr.attr); result = device_create_file(hwmon->device, &tz->temp_crit.attr); if (result) goto unregister_input; } } mutex_lock(&thermal_list_lock); if (new_hwmon_device) list_add_tail(&hwmon->node, &thermal_hwmon_list); list_add_tail(&tz->hwmon_node, &hwmon->tz_list); mutex_unlock(&thermal_list_lock); return 0; unregister_input: device_remove_file(hwmon->device, &tz->temp_input.attr); unregister_name: if (new_hwmon_device) { device_remove_file(hwmon->device, &dev_attr_name); hwmon_device_unregister(hwmon->device); } free_mem: if (new_hwmon_device) kfree(hwmon); return result; } static void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon = tz->hwmon; tz->hwmon = NULL; device_remove_file(hwmon->device, &tz->temp_input.attr); if (tz->ops->get_crit_temp) device_remove_file(hwmon->device, &tz->temp_crit.attr); mutex_lock(&thermal_list_lock); list_del(&tz->hwmon_node); if (!list_empty(&hwmon->tz_list)) { mutex_unlock(&thermal_list_lock); return; } list_del(&hwmon->node); mutex_unlock(&thermal_list_lock); device_remove_file(hwmon->device, &dev_attr_name); hwmon_device_unregister(hwmon->device); kfree(hwmon); } #else static int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { return 0; } static void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { } #endif static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, int delay) { cancel_delayed_work(&(tz->poll_queue)); if (!delay) return; if (delay > 1000) schedule_delayed_work(&(tz->poll_queue), round_jiffies(msecs_to_jiffies(delay))); else schedule_delayed_work(&(tz->poll_queue), msecs_to_jiffies(delay)); } static void thermal_zone_device_passive(struct thermal_zone_device *tz, int temp, int trip_temp, int trip) { int trend = 0; struct thermal_cooling_device_instance *instance; struct thermal_cooling_device *cdev; long state, max_state; /* * Above Trip? * ----------- * Calculate the thermal trend (using the passive cooling equation) * and modify the performance limit for all passive cooling devices * accordingly. Note that we assume symmetry. */ if (temp >= trip_temp) { tz->passive = true; trend = (tz->tc1 * (temp - tz->last_temperature)) + (tz->tc2 * (temp - trip_temp)); /* Heating up? */ if (trend > 0) { list_for_each_entry(instance, &tz->cooling_devices, node) { if (instance->trip != trip) continue; cdev = instance->cdev; cdev->ops->get_cur_state(cdev, &state); cdev->ops->get_max_state(cdev, &max_state); if (state++ < max_state) cdev->ops->set_cur_state(cdev, state); } } else if (trend < 0) { /* Cooling off? */ list_for_each_entry(instance, &tz->cooling_devices, node) { if (instance->trip != trip) continue; cdev = instance->cdev; cdev->ops->get_cur_state(cdev, &state); cdev->ops->get_max_state(cdev, &max_state); if (state > 0) cdev->ops->set_cur_state(cdev, --state); } } return; } /* * Below Trip? * ----------- * Implement passive cooling hysteresis to slowly increase performance * and avoid thrashing around the passive trip point. Note that we * assume symmetry. */ list_for_each_entry(instance, &tz->cooling_devices, node) { if (instance->trip != trip) continue; cdev = instance->cdev; cdev->ops->get_cur_state(cdev, &state); cdev->ops->get_max_state(cdev, &max_state); if (state > 0) cdev->ops->set_cur_state(cdev, --state); if (state == 0) tz->passive = false; } } static void thermal_zone_device_check(struct work_struct *work) { struct thermal_zone_device *tz = container_of(work, struct thermal_zone_device, poll_queue.work); thermal_zone_device_update(tz); } /** * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone * @tz: thermal zone device * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. * @cdev: thermal cooling device * * This function is usually called in the thermal zone device .bind callback. */ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev) { struct thermal_cooling_device_instance *dev; struct thermal_cooling_device_instance *pos; struct thermal_zone_device *pos1; struct thermal_cooling_device *pos2; int result; if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) return -EINVAL; list_for_each_entry(pos1, &thermal_tz_list, node) { if (pos1 == tz) break; } list_for_each_entry(pos2, &thermal_cdev_list, node) { if (pos2 == cdev) break; } if (tz != pos1 || cdev != pos2) return -EINVAL; dev = kzalloc(sizeof(struct thermal_cooling_device_instance), GFP_KERNEL); if (!dev) return -ENOMEM; dev->tz = tz; dev->cdev = cdev; dev->trip = trip; result = get_idr(&tz->idr, &tz->lock, &dev->id); if (result) goto free_mem; sprintf(dev->name, "cdev%d", dev->id); result = sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); if (result) goto release_idr; sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; dev->attr.show = thermal_cooling_device_trip_point_show; result = device_create_file(&tz->device, &dev->attr); if (result) goto remove_symbol_link; mutex_lock(&tz->lock); list_for_each_entry(pos, &tz->cooling_devices, node) if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { result = -EEXIST; break; } if (!result) list_add_tail(&dev->node, &tz->cooling_devices); mutex_unlock(&tz->lock); if (!result) return 0; device_remove_file(&tz->device, &dev->attr); remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); release_idr: release_idr(&tz->idr, &tz->lock, dev->id); free_mem: kfree(dev); return result; } EXPORT_SYMBOL(thermal_zone_bind_cooling_device); /** * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone * @tz: thermal zone device * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. * @cdev: thermal cooling device * * This function is usually called in the thermal zone device .unbind callback. */ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev) { struct thermal_cooling_device_instance *pos, *next; mutex_lock(&tz->lock); list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) { if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { list_del(&pos->node); mutex_unlock(&tz->lock); goto unbind; } } mutex_unlock(&tz->lock); return -ENODEV; unbind: device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); release_idr(&tz->idr, &tz->lock, pos->id); kfree(pos); return 0; } EXPORT_SYMBOL(thermal_zone_unbind_cooling_device); static void thermal_release(struct device *dev) { struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; if (!strncmp(dev_name(dev), "thermal_zone", sizeof "thermal_zone" - 1)) { tz = to_thermal_zone(dev); kfree(tz); } else { cdev = to_cooling_device(dev); kfree(cdev); } } static struct class thermal_class = { .name = "thermal", .dev_release = thermal_release, }; /** * thermal_cooling_device_register - register a new thermal cooling device * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. */ struct thermal_cooling_device *thermal_cooling_device_register( char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos; int result; if (strlen(type) >= THERMAL_NAME_LENGTH) return ERR_PTR(-EINVAL); if (!ops || !ops->get_max_state || !ops->get_cur_state || !ops->set_cur_state) return ERR_PTR(-EINVAL); cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); if (!cdev) return ERR_PTR(-ENOMEM); result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); if (result) { kfree(cdev); return ERR_PTR(result); } strcpy(cdev->type, type); cdev->ops = ops; cdev->device.class = &thermal_class; cdev->devdata = devdata; dev_set_name(&cdev->device, "cooling_device%d", cdev->id); result = device_register(&cdev->device); if (result) { release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); kfree(cdev); return ERR_PTR(result); } /* sys I/F */ if (type) { result = device_create_file(&cdev->device, &dev_attr_cdev_type); if (result) goto unregister; } result = device_create_file(&cdev->device, &dev_attr_max_state); if (result) goto unregister; result = device_create_file(&cdev->device, &dev_attr_cur_state); if (result) goto unregister; mutex_lock(&thermal_list_lock); list_add(&cdev->node, &thermal_cdev_list); list_for_each_entry(pos, &thermal_tz_list, node) { if (!pos->ops->bind) continue; result = pos->ops->bind(pos, cdev); if (result) break; } mutex_unlock(&thermal_list_lock); if (!result) return cdev; unregister: release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); device_unregister(&cdev->device); return ERR_PTR(result); } EXPORT_SYMBOL(thermal_cooling_device_register); /** * thermal_cooling_device_unregister - removes the registered thermal cooling device * @cdev: the thermal cooling device to remove. * * thermal_cooling_device_unregister() must be called when the device is no * longer needed. */ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) { struct thermal_zone_device *tz; struct thermal_cooling_device *pos = NULL; if (!cdev) return; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_cdev_list, node) if (pos == cdev) break; if (pos != cdev) { /* thermal cooling device not found */ mutex_unlock(&thermal_list_lock); return; } list_del(&cdev->node); list_for_each_entry(tz, &thermal_tz_list, node) { if (!tz->ops->unbind) continue; tz->ops->unbind(tz, cdev); } mutex_unlock(&thermal_list_lock); if (cdev->type[0]) device_remove_file(&cdev->device, &dev_attr_cdev_type); device_remove_file(&cdev->device, &dev_attr_max_state); device_remove_file(&cdev->device, &dev_attr_cur_state); release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); device_unregister(&cdev->device); return; } EXPORT_SYMBOL(thermal_cooling_device_unregister); /** * thermal_zone_device_update - force an update of a thermal zone's state * @ttz: the thermal zone to update */ void thermal_zone_device_update(struct thermal_zone_device *tz) { int count, ret = 0; long temp, trip_temp, max_state, last_trip_change = 0; enum thermal_trip_type trip_type; struct thermal_cooling_device_instance *instance; struct thermal_cooling_device *cdev; mutex_lock(&tz->lock); if (tz->ops->get_temp(tz, &temp)) { /* get_temp failed - retry it later */ printk(KERN_WARNING PREFIX "failed to read out thermal zone " "%d\n", tz->id); goto leave; } for (count = 0; count < tz->trips; count++) { tz->ops->get_trip_type(tz, count, &trip_type); tz->ops->get_trip_temp(tz, count, &trip_temp); switch (trip_type) { case THERMAL_TRIP_CRITICAL: if (temp >= trip_temp) { if (tz->ops->notify) ret = tz->ops->notify(tz, count, trip_type); if (!ret) { printk(KERN_EMERG "Critical temperature reached (%ld C), shutting down.\n", temp/1000); orderly_poweroff(true); } } break; case THERMAL_TRIP_HOT: if (temp >= trip_temp) if (tz->ops->notify) tz->ops->notify(tz, count, trip_type); break; case THERMAL_TRIP_ACTIVE: list_for_each_entry(instance, &tz->cooling_devices, node) { if (instance->trip != count) continue; cdev = instance->cdev; if (temp >= trip_temp) cdev->ops->set_cur_state(cdev, 1); else cdev->ops->set_cur_state(cdev, 0); } break; case THERMAL_TRIP_STATE_ACTIVE: list_for_each_entry(instance, &tz->cooling_devices, node) { if (instance->trip != count) continue; if (temp <= last_trip_change) continue; cdev = instance->cdev; cdev->ops->get_max_state(cdev, &max_state); if ((temp >= trip_temp) && ((count + 1) <= max_state)) cdev->ops->set_cur_state(cdev, count + 1); else if ((temp < trip_temp) && (count <= max_state)) cdev->ops->set_cur_state(cdev, count); last_trip_change = trip_temp; } break; case THERMAL_TRIP_PASSIVE: if (temp >= trip_temp || tz->passive) thermal_zone_device_passive(tz, temp, trip_temp, count); break; } } if (tz->forced_passive) thermal_zone_device_passive(tz, temp, tz->forced_passive, THERMAL_TRIPS_NONE); tz->last_temperature = temp; leave: if (tz->passive) thermal_zone_device_set_polling(tz, tz->passive_delay); else if (tz->polling_delay) thermal_zone_device_set_polling(tz, tz->polling_delay); else thermal_zone_device_set_polling(tz, 0); mutex_unlock(&tz->lock); } EXPORT_SYMBOL(thermal_zone_device_update); /** * thermal_zone_device_register - register a new thermal zone device * @type: the thermal zone device type * @trips: the number of trip points the thermal zone support * @devdata: private device data * @ops: standard thermal zone device callbacks * @tc1: thermal coefficient 1 for passive calculations * @tc2: thermal coefficient 2 for passive calculations * @passive_delay: number of milliseconds to wait between polls when * performing passive cooling * @polling_delay: number of milliseconds to wait between polls when checking * whether trip points have been crossed (0 for interrupt * driven systems) * * thermal_zone_device_unregister() must be called when the device is no * longer needed. The passive cooling formula uses tc1 and tc2 as described in * section 11.1.5.1 of the ACPI specification 3.0. */ struct thermal_zone_device *thermal_zone_device_register(char *type, int trips, void *devdata, const struct thermal_zone_device_ops *ops, int tc1, int tc2, int passive_delay, int polling_delay) { struct thermal_zone_device *tz; struct thermal_cooling_device *pos; enum thermal_trip_type trip_type; int result; int count; int passive = 0; if (strlen(type) >= THERMAL_NAME_LENGTH) return ERR_PTR(-EINVAL); if (trips > THERMAL_MAX_TRIPS || trips < 0) return ERR_PTR(-EINVAL); if (!ops || !ops->get_temp) return ERR_PTR(-EINVAL); tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); if (!tz) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tz->cooling_devices); idr_init(&tz->idr); mutex_init(&tz->lock); result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); if (result) { kfree(tz); return ERR_PTR(result); } strcpy(tz->type, type); tz->ops = ops; tz->device.class = &thermal_class; tz->devdata = devdata; tz->trips = trips; tz->tc1 = tc1; tz->tc2 = tc2; tz->passive_delay = passive_delay; tz->polling_delay = polling_delay; dev_set_name(&tz->device, "thermal_zone%d", tz->id); result = device_register(&tz->device); if (result) { release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); kfree(tz); return ERR_PTR(result); } /* sys I/F */ if (type) { result = device_create_file(&tz->device, &dev_attr_type); if (result) goto unregister; } result = device_create_file(&tz->device, &dev_attr_temp); if (result) goto unregister; if (ops->get_mode) { result = device_create_file(&tz->device, &dev_attr_mode); if (result) goto unregister; } for (count = 0; count < trips; count++) { TRIP_POINT_ATTR_ADD(&tz->device, count, result); if (result) goto unregister; tz->ops->get_trip_type(tz, count, &trip_type); if (trip_type == THERMAL_TRIP_PASSIVE) passive = 1; } if (!passive) result = device_create_file(&tz->device, &dev_attr_passive); if (result) goto unregister; result = thermal_add_hwmon_sysfs(tz); if (result) goto unregister; mutex_lock(&thermal_list_lock); list_add_tail(&tz->node, &thermal_tz_list); if (ops->bind) list_for_each_entry(pos, &thermal_cdev_list, node) { result = ops->bind(tz, pos); if (result) break; } mutex_unlock(&thermal_list_lock); INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); thermal_zone_device_update(tz); if (!result) return tz; unregister: release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); device_unregister(&tz->device); return ERR_PTR(result); } EXPORT_SYMBOL(thermal_zone_device_register); /** * thermal_device_unregister - removes the registered thermal zone device * @tz: the thermal zone device to remove */ void thermal_zone_device_unregister(struct thermal_zone_device *tz) { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; int count; if (!tz) return; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) if (pos == tz) break; if (pos != tz) { /* thermal zone device not found */ mutex_unlock(&thermal_list_lock); return; } list_del(&tz->node); if (tz->ops->unbind) list_for_each_entry(cdev, &thermal_cdev_list, node) tz->ops->unbind(tz, cdev); mutex_unlock(&thermal_list_lock); thermal_zone_device_set_polling(tz, 0); if (tz->type[0]) device_remove_file(&tz->device, &dev_attr_type); device_remove_file(&tz->device, &dev_attr_temp); if (tz->ops->get_mode) device_remove_file(&tz->device, &dev_attr_mode); for (count = 0; count < tz->trips; count++) TRIP_POINT_ATTR_REMOVE(&tz->device, count); thermal_remove_hwmon_sysfs(tz); release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); idr_destroy(&tz->idr); mutex_destroy(&tz->lock); device_unregister(&tz->device); return; } EXPORT_SYMBOL(thermal_zone_device_unregister); #ifdef CONFIG_NET static struct genl_family thermal_event_genl_family = { .id = GENL_ID_GENERATE, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, }; static struct genl_multicast_group thermal_event_mcgrp = { .name = THERMAL_GENL_MCAST_GROUP_NAME, }; int generate_netlink_event(u32 orig, enum events event) { struct sk_buff *skb; struct nlattr *attr; struct thermal_genl_event *thermal_event; void *msg_header; int size; int result; /* allocate memory */ size = nla_total_size(sizeof(struct thermal_genl_event)) + \ nla_total_size(0); skb = genlmsg_new(size, GFP_ATOMIC); if (!skb) return -ENOMEM; /* add the genetlink message header */ msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, &thermal_event_genl_family, 0, THERMAL_GENL_CMD_EVENT); if (!msg_header) { nlmsg_free(skb); return -ENOMEM; } /* fill the data */ attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \ sizeof(struct thermal_genl_event)); if (!attr) { nlmsg_free(skb); return -EINVAL; } thermal_event = nla_data(attr); if (!thermal_event) { nlmsg_free(skb); return -EINVAL; } memset(thermal_event, 0, sizeof(struct thermal_genl_event)); thermal_event->orig = orig; thermal_event->event = event; /* send multicast genetlink message */ result = genlmsg_end(skb, msg_header); if (result < 0) { nlmsg_free(skb); return result; } result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); if (result) printk(KERN_INFO "failed to send netlink event:%d", result); return result; } EXPORT_SYMBOL(generate_netlink_event); static int genetlink_init(void) { int result; result = genl_register_family(&thermal_event_genl_family); if (result) return result; result = genl_register_mc_group(&thermal_event_genl_family, &thermal_event_mcgrp); if (result) genl_unregister_family(&thermal_event_genl_family); return result; } static void genetlink_exit(void) { genl_unregister_family(&thermal_event_genl_family); } #else /* !CONFIG_NET */ static inline int genetlink_init(void) { return 0; } static inline void genetlink_exit(void) {} #endif /* !CONFIG_NET */ static int __init thermal_init(void) { int result = 0; result = class_register(&thermal_class); if (result) { idr_destroy(&thermal_tz_idr); idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); } result = genetlink_init(); return result; } static void __exit thermal_exit(void) { class_unregister(&thermal_class); idr_destroy(&thermal_tz_idr); idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); genetlink_exit(); } fs_initcall(thermal_init); module_exit(thermal_exit);
gpl-2.0
liutz/PDFViewer
jni/mupdf/mupdf/pdf/pdf_metrics.c
39
2746
#include "fitz-internal.h" #include "mupdf-internal.h" void pdf_set_font_wmode(fz_context *ctx, pdf_font_desc *font, int wmode) { font->wmode = wmode; } void pdf_set_default_hmtx(fz_context *ctx, pdf_font_desc *font, int w) { font->dhmtx.w = w; } void pdf_set_default_vmtx(fz_context *ctx, pdf_font_desc *font, int y, int w) { font->dvmtx.y = y; font->dvmtx.w = w; } void pdf_add_hmtx(fz_context *ctx, pdf_font_desc *font, int lo, int hi, int w) { if (font->hmtx_len + 1 >= font->hmtx_cap) { int new_cap = font->hmtx_cap + 16; font->hmtx = fz_resize_array(ctx, font->hmtx, new_cap, sizeof(pdf_hmtx)); font->hmtx_cap = new_cap; } font->hmtx[font->hmtx_len].lo = lo; font->hmtx[font->hmtx_len].hi = hi; font->hmtx[font->hmtx_len].w = w; font->hmtx_len++; } void pdf_add_vmtx(fz_context *ctx, pdf_font_desc *font, int lo, int hi, int x, int y, int w) { if (font->vmtx_len + 1 >= font->vmtx_cap) { int new_cap = font->vmtx_cap + 16; font->vmtx = fz_resize_array(ctx, font->vmtx, new_cap, sizeof(pdf_vmtx)); font->vmtx_cap = new_cap; } font->vmtx[font->vmtx_len].lo = lo; font->vmtx[font->vmtx_len].hi = hi; font->vmtx[font->vmtx_len].x = x; font->vmtx[font->vmtx_len].y = y; font->vmtx[font->vmtx_len].w = w; font->vmtx_len++; } static int cmph(const void *a0, const void *b0) { pdf_hmtx *a = (pdf_hmtx*)a0; pdf_hmtx *b = (pdf_hmtx*)b0; return a->lo - b->lo; } static int cmpv(const void *a0, const void *b0) { pdf_vmtx *a = (pdf_vmtx*)a0; pdf_vmtx *b = (pdf_vmtx*)b0; return a->lo - b->lo; } void pdf_end_hmtx(fz_context *ctx, pdf_font_desc *font) { if (!font->hmtx) return; qsort(font->hmtx, font->hmtx_len, sizeof(pdf_hmtx), cmph); font->size += font->hmtx_cap * sizeof(pdf_hmtx); } void pdf_end_vmtx(fz_context *ctx, pdf_font_desc *font) { if (!font->vmtx) return; qsort(font->vmtx, font->vmtx_len, sizeof(pdf_vmtx), cmpv); font->size += font->vmtx_cap * sizeof(pdf_vmtx); } pdf_hmtx pdf_lookup_hmtx(fz_context *ctx, pdf_font_desc *font, int cid) { int l = 0; int r = font->hmtx_len - 1; int m; if (!font->hmtx) goto notfound; while (l <= r) { m = (l + r) >> 1; if (cid < font->hmtx[m].lo) r = m - 1; else if (cid > font->hmtx[m].hi) l = m + 1; else return font->hmtx[m]; } notfound: return font->dhmtx; } pdf_vmtx pdf_lookup_vmtx(fz_context *ctx, pdf_font_desc *font, int cid) { pdf_hmtx h; pdf_vmtx v; int l = 0; int r = font->vmtx_len - 1; int m; if (!font->vmtx) goto notfound; while (l <= r) { m = (l + r) >> 1; if (cid < font->vmtx[m].lo) r = m - 1; else if (cid > font->vmtx[m].hi) l = m + 1; else return font->vmtx[m]; } notfound: h = pdf_lookup_hmtx(ctx, font, cid); v = font->dvmtx; v.x = h.w / 2; return v; }
gpl-2.0
matthewbauer/wesnoth
doc/design/gui2/unit_attack.cpp
39
3906
#define GETTEXT_DOMAIN "wesnoth-lib" /*@ \label{unit_attack.cpp:textdomain} @*/ #include "gui/dialogs/unit_attack.hpp" #include "gui/widgets/image.hpp" #include "gui/widgets/listbox.hpp" #include "gui/widgets/settings.hpp" #include "gui/widgets/window.hpp" #include "unit.hpp" namespace gui2 { /*@ \label{unit_attack.cpp:wiki} @*/ /*WIKI * @page = GUIWindowDefinitionWML * @order = 2_unit_attack * * == Unit attack == * * This shows the dialog for attacking units. * * @start_table = grid * * attacker_portrait (image) Shows the portrait of the attacking unit. * attacker_icon (image) Shows the icon of the attacking unit. * attacker_name (control) Shows the name of the attacking unit. * * * defender_portrait (image) Shows the portrait of the defending unit. * defender_icon (image) Shows the icon of the defending unit. * defender_name (control) Shows the name of the defending unit. * * * (weapon_list) (listbox) The list with weapons to choos from. * -[attacker_weapon] (control) The weapon for the attacker to use. * -[defender_weapon] (control) The weapon for the defender to use. * * @end_table */ REGISTER_WINDOW(unit_attack) /*@ \label{unit_attack.cpp:register} @*/ tunit_attack::tunit_attack( const unit_map::iterator& attacker_itor , const unit_map::iterator& defender_itor , const std::vector<battle_context>& weapons , const int best_weapon) : selected_weapon_(-1) , attacker_itor_(attacker_itor) , defender_itor_(defender_itor) , weapons_(weapons) , best_weapon_(best_weapon) { } template<class T> static void set_label( twindow& window , const std::string& id , const std::string& label) { T* widget = find_widget<T>(&window, id, false, false); if(widget) { widget->set_label(label); } } static void set_attacker_info(twindow& w, unit& u) { set_label<timage>(w, "attacker_portrait", u.absolute_image()); set_label<timage>(w, "attacker_icon", u.absolute_image()); set_label<tcontrol>(w, "attacker_name", u.name()); } static void set_defender_info(twindow& w, unit& u) { set_label<timage>(w, "defender_portrait", u.absolute_image()); set_label<timage>(w, "defender_icon", u.absolute_image()); set_label<tcontrol>(w, "defender_name", u.name()); } static void set_weapon_info(twindow& window , const std::vector<battle_context>& weapons , const int best_weapon) { tlistbox& weapon_list = find_widget<tlistbox>(&window, "weapon_list", false); window.keyboard_capture(&weapon_list); const config empty; attack_type no_weapon(empty); for (size_t i = 0; i < weapons.size(); ++i) { const battle_context::unit_stats& attacker = weapons[i].get_attacker_stats(); const battle_context::unit_stats& defender = weapons[i].get_defender_stats(); const attack_type& attacker_weapon = attack_type(*attacker.weapon); const attack_type& defender_weapon = attack_type( defender.weapon ? *defender.weapon : no_weapon); std::map<std::string, string_map> data; string_map item; item["label"] = attacker_weapon.name(); data.insert(std::make_pair("attacker_weapon", item)); item["label"] = defender_weapon.name(); data.insert(std::make_pair("defender_weapon", item)); weapon_list.add_row(data); } assert(best_weapon < static_cast<int>(weapon_list.get_item_count())); weapon_list.select_row(best_weapon); } void tunit_attack::pre_show(CVideo& /*video*/, twindow& window) /*@ \label{unit_attack.cpp:pre_show} @*/ { set_attacker_info(window, *attacker_itor_); set_defender_info(window, *defender_itor_); selected_weapon_ = -1; set_weapon_info(window, weapons_, best_weapon_); } void tunit_attack::post_show(twindow& window) { if(get_retval() == twindow::OK) { selected_weapon_ = find_widget<tlistbox>(&window, "weapon_list", false) .get_selected_row(); } } } // namespace gui2
gpl-2.0
Perseus71/KT747
drivers/media/video/videobuf2-msm-mem.c
39
10399
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Based on videobuf-dma-contig.c, * (c) 2008 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * helper functions for physically contiguous pmem capture buffers * The functions support contiguous memory allocations using pmem * kernel API. */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/android_pmem.h> #include <linux/memory_alloc.h> #include <media/videobuf2-msm-mem.h> #include <media/msm_camera.h> #include <mach/memory.h> #include <media/videobuf2-core.h> #include <mach/iommu_domains.h> #define MAGIC_PMEM 0x0733ac64 #define MAGIC_CHECK(is, should) \ if (unlikely((is) != (should))) { \ pr_err("magic mismatch: %x expected %x\n", (is), (should)); \ BUG(); \ } #ifdef CONFIG_MSM_CAMERA_DEBUG #define D(fmt, args...) pr_debug("videobuf-msm-mem: " fmt, ##args) #else #define D(fmt, args...) do {} while (0) #endif static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem) { unsigned long phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION int rc, len; mem->client = msm_ion_client_create(-1, "camera"); if (IS_ERR((void *)mem->client)) { pr_err("%s Could not create client\n", __func__); goto client_failed; } mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K, (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID), 0); if (IS_ERR((void *)mem->ion_handle)) { pr_err("%s Could not allocate\n", __func__); goto alloc_failed; } rc = ion_map_iommu(mem->client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, (unsigned long *)&phyaddr, (unsigned long *)&len, 0, 0); if (rc < 0) { pr_err("%s Could not get physical address\n", __func__); goto phys_failed; } #else phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K); #endif return phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION phys_failed: ion_free(mem->client, mem->ion_handle); alloc_failed: ion_client_destroy(mem->client); client_failed: return 0; #endif } static int32_t msm_mem_free(struct videobuf2_contig_pmem *mem) { int32_t rc = 0; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_iommu(mem->client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL); ion_free(mem->client, mem->ion_handle); ion_client_destroy(mem->client); #else free_contiguous_memory_by_paddr(mem->phyaddr); #endif return rc; } static void videobuf2_vm_close(struct vm_area_struct *vma) { struct videobuf2_contig_pmem *mem = vma->vm_private_data; D("vm_close %p [count=%u,vma=%08lx-%08lx]\n", mem, mem->count, vma->vm_start, vma->vm_end); mem->count--; } static void videobuf2_vm_open(struct vm_area_struct *vma) { struct videobuf2_contig_pmem *mem = vma->vm_private_data; D("vm_open %p [count=%u,vma=%08lx-%08lx]\n", mem, mem->count, vma->vm_start, vma->vm_end); mem->count++; } static const struct vm_operations_struct videobuf2_vm_ops = { .open = videobuf2_vm_open, .close = videobuf2_vm_close, }; static void *msm_vb2_mem_ops_alloc(void *alloc_ctx, unsigned long size) { struct videobuf2_contig_pmem *mem; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return ERR_PTR(-ENOMEM); mem->magic = MAGIC_PMEM; mem->size = PAGE_ALIGN(size); mem->alloc_ctx = alloc_ctx; mem->is_userptr = 0; mem->phyaddr = msm_mem_allocate(mem); if (!mem->phyaddr) { pr_err("%s : pmem memory allocation failed\n", __func__); kfree(mem); return ERR_PTR(-ENOMEM); } mem->mapped_phyaddr = mem->phyaddr; return mem; } static void msm_vb2_mem_ops_put(void *buf_priv) { struct videobuf2_contig_pmem *mem = buf_priv; if (!mem->is_userptr) { D("%s Freeing memory ", __func__); msm_mem_free(mem); } kfree(mem); } int videobuf2_pmem_contig_mmap_get(struct videobuf2_contig_pmem *mem, struct videobuf2_msm_offset *offset, enum videobuf2_buffer_type buffer_type, int path) { if (offset) mem->offset = *offset; else memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset)); mem->buffer_type = buffer_type; mem->path = path; return 0; } EXPORT_SYMBOL_GPL(videobuf2_pmem_contig_mmap_get); /** * videobuf_pmem_contig_user_get() - setup user space memory pointer * @mem: per-buffer private videobuf-contig-pmem data * @vb: video buffer to map * * This function validates and sets up a pointer to user space memory. * Only physically contiguous pfn-mapped memory is accepted. * * Returns 0 if successful. */ int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem, struct videobuf2_msm_offset *offset, enum videobuf2_buffer_type buffer_type, uint32_t addr_offset, int path, struct ion_client *client, int domain_num) { unsigned long len; int rc = 0; #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION unsigned long kvstart; #endif unsigned long paddr = 0; if (mem->phyaddr != 0) return 0; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION mem->ion_handle = ion_import_dma_buf(client, (int)mem->vaddr); if (IS_ERR_OR_NULL(mem->ion_handle)) { pr_err("%s ION import failed\n", __func__); return PTR_ERR(mem->ion_handle); } #if !defined(CONFIG_MSM_IOMMU) rc = ion_phys(client, mem->ion_handle, (ion_phys_addr_t *)&mem->phyaddr, (size_t *)&len); #else rc = ion_map_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, 0, 0); if (rc < 0) ion_free(client, mem->ion_handle); #endif #if !defined(CONFIG_MSM_IOMMU) #if defined(CACHABLE_MEMORY) rc = ion_handle_get_flags(client, mem->ion_handle, &mem->ion_flags); mem->kernel_vaddr = ion_map_kernel(client, mem->ion_handle); #endif #endif #elif CONFIG_ANDROID_PMEM rc = get_pmem_file((int)mem->vaddr, (unsigned long *)&mem->phyaddr, &kvstart, &len, &mem->file); if (rc < 0) { pr_err("%s: get_pmem_file fd %d error %d\n", __func__, (int)mem->vaddr, rc); return rc; } #else paddr = 0; kvstart = 0; #endif if (offset) mem->offset = *offset; else memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset)); mem->path = path; mem->buffer_type = buffer_type; paddr = mem->phyaddr; mem->mapped_phyaddr = paddr + addr_offset; mem->addr_offset = addr_offset; return rc; } EXPORT_SYMBOL_GPL(videobuf2_pmem_contig_user_get); void videobuf2_pmem_contig_user_put(struct videobuf2_contig_pmem *mem, struct ion_client *client, int domain_num) { if (mem->is_userptr) { #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION if (IS_ERR_OR_NULL(mem->ion_handle)) { pr_err("%s ION import failed\n", __func__); return; } #if !defined(CONFIG_MSM_IOMMU) #if defined(CACHABLE_MEMORY) ion_unmap_kernel(client, mem->ion_handle); #endif #else ion_unmap_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL); #endif ion_free(client, mem->ion_handle); #elif CONFIG_ANDROID_PMEM put_pmem_file(mem->file); #endif } mem->is_userptr = 0; mem->phyaddr = 0; mem->size = 0; mem->mapped_phyaddr = 0; } EXPORT_SYMBOL_GPL(videobuf2_pmem_contig_user_put); static void *msm_vb2_mem_ops_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { struct videobuf2_contig_pmem *mem; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return ERR_PTR(-ENOMEM); mem->magic = MAGIC_PMEM; mem->is_userptr = 1; mem->vaddr = (void *)vaddr; mem->size = size; mem->alloc_ctx = alloc_ctx; return mem; } static void msm_vb2_mem_ops_put_userptr(void *buf_priv) { kfree(buf_priv); } static void *msm_vb2_mem_ops_vaddr(void *buf_priv) { struct videobuf2_contig_pmem *mem = buf_priv; return mem->vaddr; } static void *msm_vb2_mem_ops_cookie(void *buf_priv) { return buf_priv; } static unsigned int msm_vb2_mem_ops_num_users(void *buf_priv) { struct videobuf2_contig_pmem *mem = buf_priv; MAGIC_CHECK(mem->magic, MAGIC_PMEM); return mem->count; } static int msm_vb2_mem_ops_mmap(void *buf_priv, struct vm_area_struct *vma) { struct videobuf2_contig_pmem *mem; int retval; unsigned long size; D("%s\n", __func__); mem = buf_priv; D("mem = 0x%x\n", (u32)mem); BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); /* Try to remap memory */ size = vma->vm_end - vma->vm_start; size = (size < mem->size) ? size : mem->size; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); retval = remap_pfn_range(vma, vma->vm_start, mem->phyaddr >> PAGE_SHIFT, size, vma->vm_page_prot); if (retval) { pr_err("mmap: remap failed with error %d. ", retval); goto error; } mem->vaddr = (void *)vma->vm_start; vma->vm_ops = &videobuf2_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = mem; D("mmap %p: %08lx-%08lx (%lx) pgoff %08lx\n", mem, vma->vm_start, vma->vm_end, (long int)mem->size, vma->vm_pgoff); videobuf2_vm_open(vma); return 0; error: return -ENOMEM; } static struct vb2_mem_ops msm_vb2_mem_ops = { .alloc = msm_vb2_mem_ops_alloc, .put = msm_vb2_mem_ops_put, .get_userptr = msm_vb2_mem_ops_get_userptr, .put_userptr = msm_vb2_mem_ops_put_userptr, .vaddr = msm_vb2_mem_ops_vaddr, .cookie = msm_vb2_mem_ops_cookie, .num_users = msm_vb2_mem_ops_num_users, .mmap = msm_vb2_mem_ops_mmap }; void videobuf2_queue_pmem_contig_init(struct vb2_queue *q, enum v4l2_buf_type type, const struct vb2_ops *ops, unsigned int size, void *priv) { memset(q, 0, sizeof(struct vb2_queue)); q->mem_ops = &msm_vb2_mem_ops; q->ops = ops; q->drv_priv = priv; q->type = type; q->io_modes = VB2_MMAP | VB2_USERPTR; q->io_flags = 0; q->buf_struct_size = size; vb2_queue_init(q); } EXPORT_SYMBOL_GPL(videobuf2_queue_pmem_contig_init); unsigned long videobuf2_to_pmem_contig(struct vb2_buffer *vb, unsigned int plane_no) { struct videobuf2_contig_pmem *mem; mem = vb2_plane_cookie(vb, plane_no); BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); return mem->mapped_phyaddr; } EXPORT_SYMBOL_GPL(videobuf2_to_pmem_contig); MODULE_DESCRIPTION("helper module to manage video4linux PMEM contig buffers"); MODULE_LICENSE("GPL v2");
gpl-2.0
Pivosgroup/buildroot-linux-kernel-m3
drivers/amlogic/wifi/rtl8xxx_DU/os_dep/linux/ioctl_linux.c
39
241707
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _IOCTL_LINUX_C_ #include <drv_conf.h> #include <osdep_service.h> #include <drv_types.h> #include <wlan_bssdef.h> #include <rtw_debug.h> #include <wifi.h> #include <rtw_mlme.h> #include <rtw_mlme_ext.h> #include <rtw_ioctl.h> #include <rtw_ioctl_set.h> #include <rtw_ioctl_query.h> //#ifdef CONFIG_MP_INCLUDED #include <rtw_mp_ioctl.h> //#endif #ifdef CONFIG_USB_HCI #include <usb_ops.h> #endif //CONFIG_USB_HCI #include <rtw_version.h> #ifdef CONFIG_MP_INCLUDED #include <rtw_mp.h> #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) #define iwe_stream_add_event(a, b, c, d, e) iwe_stream_add_event(b, c, d, e) #define iwe_stream_add_point(a, b, c, d, e) iwe_stream_add_point(b, c, d, e) #endif #define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30 #define SCAN_ITEM_SIZE 768 #define MAX_CUSTOM_LEN 64 #define RATE_COUNT 4 #ifdef CONFIG_GLOBAL_UI_PID extern int ui_pid[3]; #endif // combo scan #define WEXT_CSCAN_AMOUNT 9 #define WEXT_CSCAN_BUF_LEN 360 #define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00" #define WEXT_CSCAN_HEADER_SIZE 12 #define WEXT_CSCAN_SSID_SECTION 'S' #define WEXT_CSCAN_CHANNEL_SECTION 'C' #define WEXT_CSCAN_NPROBE_SECTION 'N' #define WEXT_CSCAN_ACTV_DWELL_SECTION 'A' #define WEXT_CSCAN_PASV_DWELL_SECTION 'P' #define WEXT_CSCAN_HOME_DWELL_SECTION 'H' #define WEXT_CSCAN_TYPE_SECTION 'T' extern u8 key_2char2num(u8 hch, u8 lch); extern u8 str_2char2num(u8 hch, u8 lch); int rfpwrstate_check(_adapter *padapter); u32 rtw_rates[] = {1000000,2000000,5500000,11000000, 6000000,9000000,12000000,18000000,24000000,36000000,48000000,54000000}; static const char * const iw_operation_mode[] = { "Auto", "Ad-Hoc", "Managed", "Master", "Repeater", "Secondary", "Monitor" }; static int hex2num_i(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hex2byte_i(const char *hex) { int a, b; a = hex2num_i(*hex++); if (a < 0) return -1; b = hex2num_i(*hex++); if (b < 0) return -1; return (a << 4) | b; } /** * hwaddr_aton - Convert ASCII string to MAC address * @txt: MAC address as a string (e.g., "00:11:22:33:44:55") * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes) * Returns: 0 on success, -1 on failure (e.g., string not a MAC address) */ static int hwaddr_aton_i(const char *txt, u8 *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num_i(*txt++); if (a < 0) return -1; b = hex2num_i(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } static void indicate_wx_custom_event(_adapter *padapter, char *msg) { u8 *buff, *p; union iwreq_data wrqu; if (strlen(msg) > IW_CUSTOM_MAX) { DBG_871X("%s strlen(msg):%u > IW_CUSTOM_MAX:%u\n", __FUNCTION__ ,strlen(msg), IW_CUSTOM_MAX); return; } buff = rtw_zmalloc(IW_CUSTOM_MAX+1); if(!buff) return; _rtw_memcpy(buff, msg, strlen(msg)); _rtw_memset(&wrqu,0,sizeof(wrqu)); wrqu.data.length = strlen(msg); DBG_8192C("%s %s\n", __FUNCTION__, buff); wireless_send_event(padapter->pnetdev, IWEVCUSTOM, &wrqu, buff); rtw_mfree(buff, IW_CUSTOM_MAX+1); } static void request_wps_pbc_event(_adapter *padapter) { u8 *buff, *p; union iwreq_data wrqu; buff = rtw_malloc(IW_CUSTOM_MAX); if(!buff) return; _rtw_memset(buff, 0, IW_CUSTOM_MAX); p=buff; p+=sprintf(p, "WPS_PBC_START.request=TRUE"); _rtw_memset(&wrqu,0,sizeof(wrqu)); wrqu.data.length = p-buff; wrqu.data.length = (wrqu.data.length<IW_CUSTOM_MAX) ? wrqu.data.length:IW_CUSTOM_MAX; DBG_8192C("%s\n", __FUNCTION__); wireless_send_event(padapter->pnetdev, IWEVCUSTOM, &wrqu, buff); if(buff) { rtw_mfree(buff, IW_CUSTOM_MAX); } } void indicate_wx_scan_complete_event(_adapter *padapter) { union iwreq_data wrqu; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _rtw_memset(&wrqu, 0, sizeof(union iwreq_data)); //DBG_8192C("+rtw_indicate_wx_scan_complete_event\n"); wireless_send_event(padapter->pnetdev, SIOCGIWSCAN, &wrqu, NULL); } void rtw_indicate_wx_assoc_event(_adapter *padapter) { union iwreq_data wrqu; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _rtw_memset(&wrqu, 0, sizeof(union iwreq_data)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; _rtw_memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN); //DBG_8192C("+rtw_indicate_wx_assoc_event\n"); wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL); } void rtw_indicate_wx_disassoc_event(_adapter *padapter) { union iwreq_data wrqu; _rtw_memset(&wrqu, 0, sizeof(union iwreq_data)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; _rtw_memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); //DBG_8192C("+rtw_indicate_wx_disassoc_event\n"); wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL); } /* uint rtw_is_cckrates_included(u8 *rate) { u32 i = 0; while(rate[i]!=0) { if ( (((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) || (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22) ) return _TRUE; i++; } return _FALSE; } uint rtw_is_cckratesonly_included(u8 *rate) { u32 i = 0; while(rate[i]!=0) { if ( (((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) && (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22) ) return _FALSE; i++; } return _TRUE; } */ static char *translate_scan(_adapter *padapter, struct iw_request_info* info, struct wlan_network *pnetwork, char *start, char *stop) { struct iw_event iwe; u16 cap; u32 ht_ielen = 0; char custom[MAX_CUSTOM_LEN]; char *p; u16 max_rate=0, rate, ht_cap=_FALSE; u32 i = 0; char *current_val; long rssi; u8 bw_40MHz=0, short_GI=0; u16 mcs_rate=0; struct registry_priv *pregpriv = &padapter->registrypriv; #ifdef CONFIG_P2P struct wifidirect_info *pwdinfo = &padapter->wdinfo; #endif //CONFIG_P2P #ifdef CONFIG_P2P if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) { u32 blnGotP2PIE = _FALSE; // User is doing the P2P device discovery // The prefix of SSID should be "DIRECT-" and the IE should contains the P2P IE. // If not, the driver should ignore this AP and go to the next AP. // Verifying the SSID if ( _rtw_memcmp( pnetwork->network.Ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN ) ) { u32 p2pielen = 0; // Verifying the P2P IE if ( rtw_get_p2p_ie( &pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen) ) { blnGotP2PIE = _TRUE; } } if ( blnGotP2PIE == _FALSE ) { return start; } } #endif //CONFIG_P2P /* AP MAC address */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; _rtw_memcpy(iwe.u.ap_addr.sa_data, pnetwork->network.MacAddress, ETH_ALEN); start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); /* Add the ESSID */ iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; iwe.u.data.length = min((u16)pnetwork->network.Ssid.SsidLength, (u16)32); start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid); //parsing HT_CAP_IE p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12); if(p && ht_ielen>0) { struct rtw_ieee80211_ht_cap *pht_capie; ht_cap = _TRUE; pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2); _rtw_memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2); bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH) ? 1:0; short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1:0; } /* Add the protocol name */ iwe.cmd = SIOCGIWNAME; if ((rtw_is_cckratesonly_included((u8*)&pnetwork->network.SupportedRates)) == _TRUE) { if(ht_cap == _TRUE) snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn"); else snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b"); } else if ((rtw_is_cckrates_included((u8*)&pnetwork->network.SupportedRates)) == _TRUE) { if(ht_cap == _TRUE) snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn"); else snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg"); } else { if(pnetwork->network.Configuration.DSConfig > 14) { if(ht_cap == _TRUE) snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an"); else snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a"); } else { if(ht_cap == _TRUE) snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn"); else snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g"); } } start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); /* Add mode */ iwe.cmd = SIOCGIWMODE; _rtw_memcpy((u8 *)&cap, rtw_get_capability_from_ie(pnetwork->network.IEs), 2); cap = le16_to_cpu(cap); if(cap & (WLAN_CAPABILITY_IBSS |WLAN_CAPABILITY_BSS)){ if (cap & WLAN_CAPABILITY_BSS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); } if(pnetwork->network.Configuration.DSConfig<1 /*|| pnetwork->network.Configuration.DSConfig>14*/) pnetwork->network.Configuration.DSConfig = 1; /* Add frequency/channel */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = rtw_ch2freq(pnetwork->network.Configuration.DSConfig) * 100000; iwe.u.freq.e = 1; iwe.u.freq.i = pnetwork->network.Configuration.DSConfig; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; if (cap & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid); /*Add basic and extended rates */ max_rate = 0; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); while(pnetwork->network.SupportedRates[i]!=0) { rate = pnetwork->network.SupportedRates[i]&0x7F; if (rate > max_rate) max_rate = rate; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); i++; } if(ht_cap == _TRUE) { if(mcs_rate&0x8000)//MCS15 { max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130); } else if(mcs_rate&0x0080)//MCS7 { max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65); } else//default MCS7 { DBG_8192C("wx_get_scan, mcs_rate_bitmap=0x%x\n", mcs_rate); max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65); } max_rate = max_rate*2;//Mbps/2; } iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; iwe.u.bitrate.value = max_rate * 500000; start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN); //parsing WPA/WPA2 IE { u8 buf[MAX_WPA_IE_LEN]; u8 wpa_ie[255],rsn_ie[255]; u16 wpa_len=0,rsn_len=0; u8 *p; sint out_len=0; out_len=rtw_get_sec_ie(pnetwork->network.IEs ,pnetwork->network.IELength,rsn_ie,&rsn_len,wpa_ie,&wpa_len); RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_get_scan: ssid=%s\n",pnetwork->network.Ssid.Ssid)); RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_get_scan: wpa_len=%d rsn_len=%d\n",wpa_len,rsn_len)); if (wpa_len > 0) { p=buf; _rtw_memset(buf, 0, MAX_WPA_IE_LEN); p += sprintf(p, "wpa_ie="); for (i = 0; i < wpa_len; i++) { p += sprintf(p, "%02x", wpa_ie[i]); } _rtw_memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; iwe.u.data.length = strlen(buf); start = iwe_stream_add_point(info, start, stop, &iwe,buf); _rtw_memset(&iwe, 0, sizeof(iwe)); iwe.cmd =IWEVGENIE; iwe.u.data.length = wpa_len; start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie); } if (rsn_len > 0) { p = buf; _rtw_memset(buf, 0, MAX_WPA_IE_LEN); p += sprintf(p, "rsn_ie="); for (i = 0; i < rsn_len; i++) { p += sprintf(p, "%02x", rsn_ie[i]); } _rtw_memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; iwe.u.data.length = strlen(buf); start = iwe_stream_add_point(info, start, stop, &iwe,buf); _rtw_memset(&iwe, 0, sizeof(iwe)); iwe.cmd =IWEVGENIE; iwe.u.data.length = rsn_len; start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie); } } { //parsing WPS IE int cnt = 0,total_ielen=0; u8 *wpsie_ptr=NULL; uint wps_ielen = 0; u8 *ie_ptr = pnetwork->network.IEs +_FIXED_IE_LENGTH_; total_ielen= pnetwork->network.IELength - _FIXED_IE_LENGTH_; if((ie_ptr) && (total_ielen>0)) { while(cnt < total_ielen) { if(rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen>2)) { wpsie_ptr = &ie_ptr[cnt]; iwe.cmd =IWEVGENIE; iwe.u.data.length = (u16)wps_ielen; start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr); } cnt+=ie_ptr[cnt+1]+2; //goto next } } } /* Add quality statistics */ iwe.cmd = IWEVQUAL; rssi = pnetwork->network.Rssi;//dBM #ifdef CONFIG_RTL8711 rssi = (rssi*2) + 190; if(rssi>100) rssi = 100; if(rssi<0) rssi = 0; #endif //DBG_8192C("RSSI=0x%X%%\n", rssi); // we only update signal_level (signal strength) that is rssi. iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID #ifdef CONFIG_PLATFORM_MT53XX ; iwe.u.qual.level = (u8)pnetwork->network.PhyInfo.SignalStrength;//% #else #ifdef CONFIG_SIGNAL_DISPLAY_DBM | IW_QUAL_DBM #endif ; #ifdef CONFIG_SIGNAL_DISPLAY_DBM iwe.u.qual.level = (u8) translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength);//dbm #else iwe.u.qual.level = (u8)pnetwork->network.PhyInfo.SignalStrength;//% #endif #endif iwe.u.qual.qual = (u8)pnetwork->network.PhyInfo.SignalQuality; // signal quality #ifdef CONFIG_PLATFORM_ROCKCHIPS iwe.u.qual.noise = -100; // noise level suggest by zhf@rockchips #else iwe.u.qual.noise = 0; // noise level #endif //CONFIG_PLATFORM_ROCKCHIPS //DBG_8192C("iqual=%d, ilevel=%d, inoise=%d, iupdated=%d\n", iwe.u.qual.qual, iwe.u.qual.level , iwe.u.qual.noise, iwe.u.qual.updated); start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); //how to translate rssi to ?% //rssi = (iwe.u.qual.level*2) + 190; //if(rssi>100) rssi = 100; //if(rssi<0) rssi = 0; return start; } static int wpa_set_auth_algs(struct net_device *dev, u32 value) { _adapter *padapter = (_adapter *) rtw_netdev_priv(dev); int ret = 0; if ((value & AUTH_ALG_SHARED_KEY)&&(value & AUTH_ALG_OPEN_SYSTEM)) { DBG_8192C("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n",value); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto; } else if (value & AUTH_ALG_SHARED_KEY) { DBG_8192C("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY [value:0x%x]\n",value); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; #ifdef CONFIG_PLATFORM_MT53XX padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto; #else padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared; #endif } else if(value & AUTH_ALG_OPEN_SYSTEM) { DBG_8192C("wpa_set_auth_algs, AUTH_ALG_OPEN_SYSTEM\n"); //padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; if(padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) { #ifdef CONFIG_PLATFORM_MT53XX padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto; #else padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen; padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; #endif } } else if(value & AUTH_ALG_LEAP) { DBG_8192C("wpa_set_auth_algs, AUTH_ALG_LEAP\n"); } else { DBG_8192C("wpa_set_auth_algs, error!\n"); ret = -EINVAL; } return ret; } static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len) { int ret = 0; u32 wep_key_idx, wep_key_len,wep_total_len; NDIS_802_11_WEP *pwep = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; #ifdef CONFIG_P2P struct wifidirect_info* pwdinfo = &padapter->wdinfo; #endif //CONFIG_P2P _func_enter_; param->u.crypt.err = 0; param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0'; if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) { ret = -EINVAL; goto exit; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { if (param->u.crypt.idx >= WEP_KEYS) { ret = -EINVAL; goto exit; } } else { ret = -EINVAL; goto exit; } if (strcmp(param->u.crypt.alg, "WEP") == 0) { RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("wpa_set_encryption, crypt.alg = WEP\n")); DBG_8192C("wpa_set_encryption, crypt.alg = WEP\n"); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; padapter->securitypriv.dot11PrivacyAlgrthm=_WEP40_; padapter->securitypriv.dot118021XGrpPrivacy=_WEP40_; wep_key_idx = param->u.crypt.idx; wep_key_len = param->u.crypt.key_len; RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("(1)wep_key_idx=%d\n", wep_key_idx)); DBG_8192C("(1)wep_key_idx=%d\n", wep_key_idx); if (wep_key_idx > WEP_KEYS) return -EINVAL; RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("(2)wep_key_idx=%d\n", wep_key_idx)); if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(NDIS_802_11_WEP, KeyMaterial); pwep =(NDIS_802_11_WEP *) rtw_malloc(wep_total_len); if(pwep == NULL){ RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,(" wpa_set_encryption: pwep allocate fail !!!\n")); goto exit; } _rtw_memset(pwep, 0, wep_total_len); pwep->KeyLength = wep_key_len; pwep->Length = wep_total_len; if(wep_key_len==13) { padapter->securitypriv.dot11PrivacyAlgrthm=_WEP104_; padapter->securitypriv.dot118021XGrpPrivacy=_WEP104_; } } else { ret = -EINVAL; goto exit; } pwep->KeyIndex = wep_key_idx; pwep->KeyIndex |= 0x80000000; _rtw_memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength); if(param->u.crypt.set_tx) { DBG_8192C("wep, set_tx=1\n"); if(rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL) { ret = -EOPNOTSUPP ; } } else { DBG_8192C("wep, set_tx=0\n"); //don't update "psecuritypriv->dot11PrivacyAlgrthm" and //"psecuritypriv->dot11PrivacyKeyIndex=keyid", but can rtw_set_key to fw/cam if (wep_key_idx >= WEP_KEYS) { ret = -EOPNOTSUPP ; goto exit; } _rtw_memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength); psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength; rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0); } goto exit; } if(padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) // 802_1x { struct sta_info * psta,*pbcmc_sta; struct sta_priv * pstapriv = &padapter->stapriv; if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == _TRUE) //sta mode { psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv)); if (psta == NULL) { //DEBUG_ERR( ("Set wpa_set_encryption: Obtain Sta_info fail \n")); } else { //Jeff: don't disable ieee8021x_blocked while clearing key if (strcmp(param->u.crypt.alg, "none") != 0) psta->ieee8021x_blocked = _FALSE; if((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)|| (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; } if(param->u.crypt.set_tx ==1)//pairwise key { _rtw_memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); if(strcmp(param->u.crypt.alg, "TKIP") == 0)//set mic key { //DEBUG_ERR(("\nset key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len)); _rtw_memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8); _rtw_memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8); padapter->securitypriv.busetkipkey=_FALSE; //_set_timer(&padapter->securitypriv.tkip_timer, 50); } //DEBUG_ERR(("\n param->u.crypt.key_len=%d\n",param->u.crypt.key_len)); //DEBUG_ERR(("\n ~~~~stastakey:unicastkey\n")); DBG_871X("\n ~~~~stastakey:unicastkey\n"); rtw_setstakey_cmd(padapter, (unsigned char *)psta, _TRUE); } else//group key { _rtw_memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key,(param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); _rtw_memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]),8); _rtw_memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]),8); padapter->securitypriv.binstallGrpkey = _TRUE; //DEBUG_ERR(("\n param->u.crypt.key_len=%d\n", param->u.crypt.key_len)); //DEBUG_ERR(("\n ~~~~stastakey:groupkey\n")); DBG_871X("\n ~~~~stastakey:groupkey\n"); padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx; rtw_set_key(padapter,&padapter->securitypriv,param->u.crypt.idx, 1); #ifdef CONFIG_P2P if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING)) { rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_DONE); } #endif //CONFIG_P2P } } pbcmc_sta=rtw_get_bcmc_stainfo(padapter); if(pbcmc_sta==NULL) { //DEBUG_ERR( ("Set OID_802_11_ADD_KEY: bcmc stainfo is null \n")); } else { //Jeff: don't disable ieee8021x_blocked while clearing key if (strcmp(param->u.crypt.alg, "none") != 0) pbcmc_sta->ieee8021x_blocked = _FALSE; if((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)|| (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) { pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; } } } else if(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) //adhoc mode { } } exit: if (pwep) { rtw_mfree((u8 *)pwep,wep_total_len); } _func_exit_; return ret; } static int rtw_set_wpa_ie(_adapter *padapter, char *pie, unsigned short ielen) { u8 *buf=NULL, *pos=NULL; u32 left; int group_cipher = 0, pairwise_cipher = 0; int ret = 0; #ifdef CONFIG_P2P struct wifidirect_info* pwdinfo = &padapter->wdinfo; #endif //CONFIG_P2P if((ielen > MAX_WPA_IE_LEN) || (pie == NULL)){ padapter->securitypriv.wps_phase = _FALSE; if(pie == NULL) return ret; else return -EINVAL; } if(ielen) { buf = rtw_zmalloc(ielen); if (buf == NULL){ ret = -ENOMEM; goto exit; } _rtw_memcpy(buf, pie , ielen); //dump { int i; DBG_8192C("\n wpa_ie(length:%d):\n", ielen); for(i=0;i<ielen;i=i+8) DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x \n",buf[i],buf[i+1],buf[i+2],buf[i+3],buf[i+4],buf[i+5],buf[i+6],buf[i+7]); } pos = buf; if(ielen < RSN_HEADER_LEN){ RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("Ie len too short %d\n", ielen)); ret = -1; goto exit; } #if 0 pos += RSN_HEADER_LEN; left = ielen - RSN_HEADER_LEN; if (left >= RSN_SELECTOR_LEN){ pos += RSN_SELECTOR_LEN; left -= RSN_SELECTOR_LEN; } else if (left > 0){ RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("Ie length mismatch, %u too much \n", left)); ret =-1; goto exit; } #endif if(rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher) == _SUCCESS) { padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_8021X; padapter->securitypriv.ndisauthtype=Ndis802_11AuthModeWPAPSK; _rtw_memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen); } if(rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher) == _SUCCESS) { padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_8021X; padapter->securitypriv.ndisauthtype=Ndis802_11AuthModeWPA2PSK; _rtw_memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen); } switch(group_cipher) { case WPA_CIPHER_NONE: padapter->securitypriv.dot118021XGrpPrivacy=_NO_PRIVACY_; padapter->securitypriv.ndisencryptstatus=Ndis802_11EncryptionDisabled; break; case WPA_CIPHER_WEP40: padapter->securitypriv.dot118021XGrpPrivacy=_WEP40_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; break; case WPA_CIPHER_TKIP: padapter->securitypriv.dot118021XGrpPrivacy=_TKIP_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled; break; case WPA_CIPHER_CCMP: padapter->securitypriv.dot118021XGrpPrivacy=_AES_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled; break; case WPA_CIPHER_WEP104: padapter->securitypriv.dot118021XGrpPrivacy=_WEP104_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; break; } switch(pairwise_cipher) { case WPA_CIPHER_NONE: padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_; padapter->securitypriv.ndisencryptstatus=Ndis802_11EncryptionDisabled; break; case WPA_CIPHER_WEP40: padapter->securitypriv.dot11PrivacyAlgrthm=_WEP40_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; break; case WPA_CIPHER_TKIP: padapter->securitypriv.dot11PrivacyAlgrthm=_TKIP_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled; break; case WPA_CIPHER_CCMP: padapter->securitypriv.dot11PrivacyAlgrthm=_AES_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled; break; case WPA_CIPHER_WEP104: padapter->securitypriv.dot11PrivacyAlgrthm=_WEP104_; padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; break; } padapter->securitypriv.wps_phase = _FALSE; {//set wps_ie u16 cnt = 0; u8 eid, wps_oui[4]={0x0,0x50,0xf2,0x04}; while( cnt < ielen ) { eid = buf[cnt]; if((eid==_VENDOR_SPECIFIC_IE_)&&(_rtw_memcmp(&buf[cnt+2], wps_oui, 4)==_TRUE)) { DBG_8192C("SET WPS_IE\n"); padapter->securitypriv.wps_ie_len = ( (buf[cnt+1]+2) < (MAX_WPA_IE_LEN<<2)) ? (buf[cnt+1]+2):(MAX_WPA_IE_LEN<<2); _rtw_memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len); padapter->securitypriv.wps_phase = _TRUE; #ifdef CONFIG_P2P if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_OK)) { rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_ING); } #endif //CONFIG_P2P DBG_8192C("SET WPS_IE, wps_phase==_TRUE\n"); cnt += buf[cnt+1]+2; break; } else { cnt += buf[cnt+1]+2; //goto next } } } } RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_set_wpa_ie: pairwise_cipher=0x%08x padapter->securitypriv.ndisencryptstatus=%d padapter->securitypriv.ndisauthtype=%d\n", pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype)); exit: if (buf) rtw_mfree(buf, ielen); return ret; } static int rtw_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u16 cap; u32 ht_ielen = 0; char *p; u8 ht_cap=_FALSE; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); WLAN_BSSID_EX *pcur_bss = &pmlmepriv->cur_network.network; NDIS_802_11_RATES_EX* prates = NULL; RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("cmd_code=%x\n", info->cmd)); _func_enter_; if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == _TRUE) { //parsing HT_CAP_IE p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12); if(p && ht_ielen>0) { ht_cap = _TRUE; } prates = &pcur_bss->SupportedRates; if (rtw_is_cckratesonly_included((u8*)prates) == _TRUE) { if(ht_cap == _TRUE) snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn"); else snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b"); } else if ((rtw_is_cckrates_included((u8*)prates)) == _TRUE) { if(ht_cap == _TRUE) snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn"); else snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg"); } else { if(pcur_bss->Configuration.DSConfig > 14) { if(ht_cap == _TRUE) snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an"); else snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a"); } else { if(ht_cap == _TRUE) snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn"); else snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g"); } } } else { //prates = &padapter->registrypriv.dev_network.SupportedRates; //snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g"); snprintf(wrqu->name, IFNAMSIZ, "unassociated"); } _func_exit_; return 0; } static int rtw_wx_set_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n")); _func_exit_; return 0; } static int rtw_wx_get_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); WLAN_BSSID_EX *pcur_bss = &pmlmepriv->cur_network.network; if(check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) { //wrqu->freq.m = ieee80211_wlan_frequencies[pcur_bss->Configuration.DSConfig-1] * 100000; wrqu->freq.m = rtw_ch2freq(pcur_bss->Configuration.DSConfig) * 100000; wrqu->freq.e = 1; wrqu->freq.i = pcur_bss->Configuration.DSConfig; } else{ wrqu->freq.m = rtw_ch2freq(padapter->mlmeextpriv.cur_channel) * 100000; wrqu->freq.e = 1; wrqu->freq.i = padapter->mlmeextpriv.cur_channel; } return 0; } static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); NDIS_802_11_NETWORK_INFRASTRUCTURE networkType ; int ret = 0; _func_enter_; if(_FAIL == rfpwrstate_check(padapter)) { ret= -EPERM; goto exit; } if (padapter->hw_init_completed==_FALSE){ ret = -EPERM; goto exit; } switch(wrqu->mode) { case IW_MODE_AUTO: networkType = Ndis802_11AutoUnknown; DBG_8192C("set_mode = IW_MODE_AUTO\n"); break; case IW_MODE_ADHOC: networkType = Ndis802_11IBSS; DBG_8192C("set_mode = IW_MODE_ADHOC\n"); break; case IW_MODE_MASTER: networkType = Ndis802_11APMode; DBG_8192C("set_mode = IW_MODE_MASTER\n"); //rtw_setopmode_cmd(padapter, networkType); break; case IW_MODE_INFRA: networkType = Ndis802_11Infrastructure; DBG_8192C("set_mode = IW_MODE_INFRA\n"); break; default : ret = -EINVAL;; RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("\n Mode: %s is not supported \n", iw_operation_mode[wrqu->mode])); goto exit; } /* if(Ndis802_11APMode == networkType) { rtw_setopmode_cmd(padapter, networkType); } else { rtw_setopmode_cmd(padapter, Ndis802_11AutoUnknown); } */ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==_FALSE){ ret = -EPERM; goto exit; } rtw_setopmode_cmd(padapter, networkType); exit: _func_exit_; return ret; } static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,(" rtw_wx_get_mode \n")); _func_enter_; if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE) { wrqu->mode = IW_MODE_INFRA; } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE) || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE)) { wrqu->mode = IW_MODE_ADHOC; } else if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE) { wrqu->mode = IW_MODE_MASTER; } else { wrqu->mode = IW_MODE_AUTO; } _func_exit_; return 0; } static int rtw_wx_set_pmkid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u8 j,blInserted = _FALSE; int intReturn = _FALSE; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct iw_pmksa* pPMK = ( struct iw_pmksa* ) extra; u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 }; u8 strIssueBssid[ ETH_ALEN ] = { 0x00 }; /* struct iw_pmksa { __u32 cmd; struct sockaddr bssid; __u8 pmkid[IW_PMKID_LEN]; //IW_PMKID_LEN=16 } There are the BSSID information in the bssid.sa_data array. If cmd is IW_PMKSA_FLUSH, it means the wpa_suppplicant wants to clear all the PMKID information. If cmd is IW_PMKSA_ADD, it means the wpa_supplicant wants to add a PMKID/BSSID to driver. If cmd is IW_PMKSA_REMOVE, it means the wpa_supplicant wants to remove a PMKID/BSSID from driver. */ _rtw_memcpy( strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN); if ( pPMK->cmd == IW_PMKSA_ADD ) { DBG_8192C( "[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n" ); if ( _rtw_memcmp( strIssueBssid, strZeroMacAddress, ETH_ALEN ) == _TRUE ) { return( intReturn ); } else { intReturn = _TRUE; } blInserted = _FALSE; //overwrite PMKID for(j=0 ; j<NUM_PMKID_CACHE; j++) { if( _rtw_memcmp( psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN) ==_TRUE ) { // BSSID is matched, the same AP => rewrite with new PMKID. DBG_8192C( "[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n" ); _rtw_memcpy( psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN); psecuritypriv->PMKIDList[ j ].bUsed = _TRUE; psecuritypriv->PMKIDIndex = j+1; blInserted = _TRUE; break; } } if(!blInserted) { // Find a new entry DBG_8192C( "[rtw_wx_set_pmkid] Use the new entry index = %d for this PMKID.\n", psecuritypriv->PMKIDIndex ); _rtw_memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN); _rtw_memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN); psecuritypriv->PMKIDList[ psecuritypriv->PMKIDIndex ].bUsed = _TRUE; psecuritypriv->PMKIDIndex++ ; if(psecuritypriv->PMKIDIndex==16) { psecuritypriv->PMKIDIndex =0; } } } else if ( pPMK->cmd == IW_PMKSA_REMOVE ) { DBG_8192C( "[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n" ); intReturn = _TRUE; for(j=0 ; j<NUM_PMKID_CACHE; j++) { if( _rtw_memcmp( psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN) ==_TRUE ) { // BSSID is matched, the same AP => Remove this PMKID information and reset it. _rtw_memset( psecuritypriv->PMKIDList[ j ].Bssid, 0x00, ETH_ALEN ); psecuritypriv->PMKIDList[ j ].bUsed = _FALSE; break; } } } else if ( pPMK->cmd == IW_PMKSA_FLUSH ) { DBG_8192C( "[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n" ); _rtw_memset( &psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof( RT_PMKID_LIST ) * NUM_PMKID_CACHE ); psecuritypriv->PMKIDIndex = 0; intReturn = _TRUE; } return( intReturn ); } static int rtw_wx_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { #ifdef CONFIG_PLATFORM_ROCKCHIPS _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); /* * 20110311 Commented by Jeff * For rockchip platform's wpa_driver_wext_get_rssi */ if(check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) { //wrqu->sens.value=-padapter->recvpriv.signal_strength; wrqu->sens.value=-padapter->recvpriv.rssi; //DBG_871X("%s: %d\n", __FUNCTION__, wrqu->sens.value); wrqu->sens.fixed = 0; /* no auto select */ } else #endif { wrqu->sens.value = 0; wrqu->sens.fixed = 0; /* no auto select */ wrqu->sens.disabled = 1; } return 0; } static int rtw_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; u16 val; int i; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_get_range. cmd_code=%x\n", info->cmd)); wrqu->data.length = sizeof(*range); _rtw_memset(range, 0, sizeof(*range)); /* Let's try to keep this struct in the same order as in * linux/include/wireless.h */ /* TODO: See what values we can set, and remove the ones we can't * set, or fill them with some default data. */ /* ~5 Mb/s real (802.11b) */ range->throughput = 5 * 1000 * 1000; // TODO: Not used in 802.11b? // range->min_nwid; /* Minimal NWID we are able to set */ // TODO: Not used in 802.11b? // range->max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ // range->old_num_channels; // range->old_num_frequency; // range->old_freq[6]; /* Filler to keep "version" at the same offset */ /* signal level threshold range */ //percent values between 0 and 100. range->max_qual.qual = 100; range->max_qual.level = 100; range->max_qual.noise = 100; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */ /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ range->avg_qual.level = 20 + -98; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = RATE_COUNT; for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) { range->bitrate[i] = rtw_rates[i]; } range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->pm_capa = 0; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 16; // range->retry_capa; /* What retry options are supported */ // range->retry_flags; /* How to decode max/min retry limit */ // range->r_time_flags; /* How to decode max/min retry life */ // range->min_retry; /* Minimal number of retries */ // range->max_retry; /* Maximal number of retries */ // range->min_r_time; /* Minimal retry lifetime */ // range->max_r_time; /* Maximal retry lifetime */ for (i = 0, val = 0; i < MAX_CHANNEL_NUM; i++) { // Include only legal frequencies for some countries if(pmlmeext->channel_set[i].ChannelNum != 0) { range->freq[val].i = pmlmeext->channel_set[i].ChannelNum; range->freq[val].m = rtw_ch2freq(pmlmeext->channel_set[i].ChannelNum) * 100000; range->freq[val].e = 1; val++; } if (val == IW_MAX_FREQUENCIES) break; } range->num_channels = val; range->num_frequency = val; // Commented by Albert 2009/10/13 // The following code will proivde the security capability to network manager. // If the driver doesn't provide this capability to network manager, // the WPA/WPA2 routers can't be choosen in the network manager. /* #define IW_SCAN_CAPA_NONE 0x00 #define IW_SCAN_CAPA_ESSID 0x01 #define IW_SCAN_CAPA_BSSID 0x02 #define IW_SCAN_CAPA_CHANNEL 0x04 #define IW_SCAN_CAPA_MODE 0x08 #define IW_SCAN_CAPA_RATE 0x10 #define IW_SCAN_CAPA_TYPE 0x20 #define IW_SCAN_CAPA_TIME 0x40 */ #if WIRELESS_EXT > 17 range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2| IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP; #endif #ifdef IW_SCAN_CAPA_ESSID //WIRELESS_EXT > 21 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |IW_SCAN_CAPA_BSSID| IW_SCAN_CAPA_CHANNEL|IW_SCAN_CAPA_MODE|IW_SCAN_CAPA_RATE; #endif _func_exit_; return 0; } //set bssid flow //s1. rtw_set_802_11_infrastructure_mode() //s2. rtw_set_802_11_authentication_mode() //s3. set_802_11_encryption_mode() //s4. rtw_set_802_11_bssid() static int rtw_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { _irqL irqL; uint ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct sockaddr *temp = (struct sockaddr *)awrq; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); _list *phead; u8 *dst_bssid, *src_bssid; _queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; NDIS_802_11_AUTHENTICATION_MODE authmode; _func_enter_; if(_FAIL == rfpwrstate_check(padapter)) { ret= -1; goto exit; } if(!padapter->bup){ ret = -1; goto exit; } if (temp->sa_family != ARPHRD_ETHER){ ret = -EINVAL; goto exit; } authmode = padapter->securitypriv.ndisauthtype; _enter_critical_bh(&queue->lock, &irqL); phead = get_list_head(queue); pmlmepriv->pscanned = get_next(phead); while (1) { if ((rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) == _TRUE) { #if 0 ret = -EINVAL; goto exit; if(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) { rtw_set_802_11_bssid(padapter, temp->sa_data); goto exit; } else { ret = -EINVAL; goto exit; } #endif break; } pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list); pmlmepriv->pscanned = get_next(pmlmepriv->pscanned); dst_bssid = pnetwork->network.MacAddress; src_bssid = temp->sa_data; if ((_rtw_memcmp(dst_bssid, src_bssid, ETH_ALEN)) == _TRUE) { if(!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) { ret = -1; _exit_critical_bh(&queue->lock, &irqL); goto exit; } break; } } _exit_critical_bh(&queue->lock, &irqL); rtw_set_802_11_authentication_mode(padapter, authmode); //set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); if (rtw_set_802_11_bssid(padapter, temp->sa_data) == _FALSE) { ret = -1; goto exit; } exit: _func_exit_; return ret; } static int rtw_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); WLAN_BSSID_EX *pcur_bss = &pmlmepriv->cur_network.network; wrqu->ap_addr.sa_family = ARPHRD_ETHER; _rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_get_wap\n")); _func_enter_; if ( ((check_fwstate(pmlmepriv, _FW_LINKED)) == _TRUE) || ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == _TRUE) || ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) == _TRUE) ) { _rtw_memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN); } else { _rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); } _func_exit_; return 0; } static int rtw_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { #if 0 /* SIOCSIWMLME data */ struct iw_mlme { __u16 cmd; /* IW_MLME_* */ __u16 reason_code; struct sockaddr addr; }; #endif int ret=0; u16 reason; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *) extra; if(mlme==NULL) return -1; reason = cpu_to_le16(mlme->reason_code); switch (mlme->cmd) { case IW_MLME_DEAUTH: if(!rtw_set_802_11_disassociate(padapter)) ret = -1; break; case IW_MLME_DISASSOC: if(!rtw_set_802_11_disassociate(padapter)) ret = -1; break; default: return -EOPNOTSUPP; } return ret; } int rfpwrstate_check(_adapter *padapter) { struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int ret = _SUCCESS; //block here for system suspend only if((pwrpriv->bInternalAutoSuspend == _FALSE) && (_TRUE == pwrpriv->bInSuspend )){ ret = _FAIL; goto exit; } if( pwrpriv->power_mgnt == PS_MODE_ACTIVE ) { goto exit; } if((pwrpriv->bInternalAutoSuspend == _TRUE) && (padapter->net_closed == _TRUE)) { ret = _FAIL; goto exit; } if (check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) { ret = _SUCCESS; goto exit; } if(rf_off == pwrpriv->rf_pwrstate ) { #ifdef CONFIG_USB_HCI #ifdef CONFIG_AUTOSUSPEND if(pwrpriv->brfoffbyhw==_TRUE) { DBG_8192C("hw still in rf_off state ...........\n"); ret = _FAIL; goto exit; } else if(padapter->registrypriv.usbss_enable) { DBG_8192C("\n %s call autoresume_enter....\n",__FUNCTION__); if(_FAIL == autoresume_enter(padapter)) { DBG_8192C("======> autoresume fail.............\n"); ret = _FAIL; goto exit; } } else #endif #endif { #ifdef CONFIG_IPS DBG_8192C("\n %s call ips_leave....\n",__FUNCTION__); if(_FAIL == ips_leave(padapter)) { DBG_8192C("======> ips_leave fail.............\n"); ret = _FAIL; goto exit; } #endif } }else { //Jeff: reset timer to avoid falling ips or selective suspend soon if(pwrpriv->bips_processing == _FALSE) rtw_set_pwr_state_check_timer(pwrpriv); } exit: return ret; } static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { u8 _status = _FALSE; int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv= &padapter->mlmepriv; NDIS_802_11_SSID ssid[RTW_SSID_SCAN_AMOUNT]; _irqL irqL; #ifdef CONFIG_P2P struct wifidirect_info *pwdinfo= &(padapter->wdinfo); #endif //CONFIG_P2P RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_set_scan\n")); _func_enter_; #ifdef DBG_IOCTL DBG_871X("DBG_IOCTL %s:%d\n",__FUNCTION__, __LINE__); #endif #ifdef CONFIG_MP_INCLUDED if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE) { ret = -1; goto exit; } #endif if(_FAIL == rfpwrstate_check(padapter)) { ret= -1; goto exit; } if(padapter->bDriverStopped){ DBG_8192C("bDriverStopped=%d\n", padapter->bDriverStopped); ret= -1; goto exit; } if(!padapter->bup){ ret = -1; goto exit; } if (padapter->hw_init_completed==_FALSE){ ret = -1; goto exit; } // When Busy Traffic, driver do not site survey. So driver return success. // wpa_supplicant will not issue SIOCSIWSCAN cmd again after scan timeout. // modify by thomas 2011-02-22. if (pmlmepriv->LinkDetectInfo.bBusyTraffic == _TRUE) { indicate_wx_scan_complete_event(padapter); goto exit; } if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == _TRUE) { indicate_wx_scan_complete_event(padapter); goto exit; } // Mareded by Albert 20101103 // For the DMP WiFi Display project, the driver won't to scan because // the pmlmepriv->scan_interval is always equal to 3. // So, the wpa_supplicant won't find out the WPS SoftAP. /* if(pmlmepriv->scan_interval>10) pmlmepriv->scan_interval = 0; if(pmlmepriv->scan_interval > 0) { DBG_8192C("scan done\n"); ret = 0; goto exit; } */ #ifdef CONFIG_P2P if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) { rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH); pwdinfo->find_phase_state_exchange_cnt = 0; rtw_free_network_queue(padapter, _TRUE); } #endif //CONFIG_P2P _rtw_memset(ssid, 0, sizeof(NDIS_802_11_SSID)*RTW_SSID_SCAN_AMOUNT); #if WIRELESS_EXT >= 17 if (wrqu->data.length == sizeof(struct iw_scan_req)) { struct iw_scan_req *req = (struct iw_scan_req *)extra; if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE); _rtw_memcpy(ssid[0].Ssid, req->essid, len); ssid[0].SsidLength = len; DBG_8192C("IW_SCAN_THIS_ESSID, ssid=%s, len=%d\n", req->essid, req->essid_len); _enter_critical_bh(&pmlmepriv->lock, &irqL); _status = rtw_sitesurvey_cmd(padapter, ssid, 1); _exit_critical_bh(&pmlmepriv->lock, &irqL); } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { DBG_8192C("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n"); } } else #endif if( wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE && _rtw_memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE) == _TRUE ) { int len = wrqu->data.length -WEXT_CSCAN_HEADER_SIZE; char *pos = extra+WEXT_CSCAN_HEADER_SIZE; char section; char sec_len; int ssid_index = 0; //DBG_871X("%s COMBO_SCAN header is recognized\n", __FUNCTION__); while(len >= 1) { section = *(pos++); len-=1; switch(section) { case WEXT_CSCAN_SSID_SECTION: //DBG_871X("WEXT_CSCAN_SSID_SECTION\n"); if(len < 1) { len = 0; break; } sec_len = *(pos++); len-=1; if(sec_len>0 && sec_len<=len) { ssid[ssid_index].SsidLength = sec_len; _rtw_memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); //DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __FUNCTION__ // , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); ssid_index++; } pos+=sec_len; len-=sec_len; break; case WEXT_CSCAN_CHANNEL_SECTION: //DBG_871X("WEXT_CSCAN_CHANNEL_SECTION\n"); pos+=1; len-=1; break; case WEXT_CSCAN_ACTV_DWELL_SECTION: //DBG_871X("WEXT_CSCAN_ACTV_DWELL_SECTION\n"); pos+=2; len-=2; break; case WEXT_CSCAN_PASV_DWELL_SECTION: //DBG_871X("WEXT_CSCAN_PASV_DWELL_SECTION\n"); pos+=2; len-=2; break; case WEXT_CSCAN_HOME_DWELL_SECTION: //DBG_871X("WEXT_CSCAN_HOME_DWELL_SECTION\n"); pos+=2; len-=2; break; case WEXT_CSCAN_TYPE_SECTION: //DBG_871X("WEXT_CSCAN_TYPE_SECTION\n"); pos+=1; len-=1; break; #if 0 case WEXT_CSCAN_NPROBE_SECTION: DBG_871X("WEXT_CSCAN_NPROBE_SECTION\n"); break; #endif default: //DBG_871X("Unknown CSCAN section %c\n", section); len = 0; // stop parsing } //DBG_871X("len:%d\n", len); } //jeff: it has still some scan paramater to parse, we only do this now... _enter_critical_bh(&pmlmepriv->lock, &irqL); _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT); _exit_critical_bh(&pmlmepriv->lock, &irqL); } else { _status = rtw_set_802_11_bssid_list_scan(padapter); } if(_status == _FALSE) ret = -1; exit: #ifdef DBG_IOCTL DBG_871X("DBG_IOCTL %s:%d return %d\n",__FUNCTION__, __LINE__, ret); #endif _func_exit_; return ret; } static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { _irqL irqL; _list *plist, *phead; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); _queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; char *ev = extra; char *stop = ev + wrqu->data.length; u32 ret = 0; u32 cnt=0; u32 wait_for_surveydone; sint wait_status; #ifdef CONFIG_P2P struct wifidirect_info* pwdinfo = &padapter->wdinfo; #endif //CONFIG_P2P RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_get_scan\n")); RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_, (" Start of Query SIOCGIWSCAN .\n")); _func_enter_; #ifdef DBG_IOCTL DBG_871X("DBG_IOCTL %s:%d\n",__FUNCTION__, __LINE__); #endif if(padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) { ret = -EINVAL; goto exit; } #ifdef CONFIG_P2P if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) { // P2P is enabled wait_for_surveydone = 200; } else { // P2P is disabled wait_for_surveydone = 100; } #else { wait_for_surveydone = 100; } #endif //CONFIG_P2P wait_status = _FW_UNDER_SURVEY #ifndef CONFIG_ANDROID |_FW_UNDER_LINKING #endif ; while(check_fwstate(pmlmepriv, wait_status) == _TRUE) { rtw_msleep_os(30); cnt++; if(cnt > wait_for_surveydone ) break; } _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); while(1) { if (rtw_end_of_queue_search(phead,plist)== _TRUE) break; if((stop - ev) < SCAN_ITEM_SIZE) { ret = -E2BIG; break; } pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); //report network only if the current channel set contains the channel to which this network belongs if( _TRUE == rtw_is_channel_set_contains_channel(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) #ifdef CONFIG_VALIDATE_SSID && _TRUE == rtw_validate_ssid(&(pnetwork->network.Ssid)) #endif ) { ev=translate_scan(padapter, a, pnetwork, ev, stop); } plist = get_next(plist); } _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); wrqu->data.length = ev-extra; wrqu->data.flags = 0; exit: _func_exit_; #ifdef DBG_IOCTL DBG_871X("DBG_IOCTL %s:%d return %d\n",__FUNCTION__, __LINE__, ret); #endif return ret ; } //set ssid flow //s1. rtw_set_802_11_infrastructure_mode() //s2. set_802_11_authenticaion_mode() //s3. set_802_11_encryption_mode() //s4. rtw_set_802_11_ssid() static int rtw_wx_set_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { _irqL irqL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _queue *queue = &pmlmepriv->scanned_queue; struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; _list *phead; s8 status = _TRUE; struct wlan_network *pnetwork = NULL; NDIS_802_11_AUTHENTICATION_MODE authmode; NDIS_802_11_SSID ndis_ssid; u8 *dst_ssid, *src_ssid; uint ret = 0, len; _func_enter_; #ifdef DBG_IOCTL DBG_871X("DBG_IOCTL %s:%d\n",__FUNCTION__, __LINE__); #endif RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("+rtw_wx_set_essid: fw_state=0x%08x\n", get_fwstate(pmlmepriv))); if(_FAIL == rfpwrstate_check(padapter)) { ret = -1; goto exit; } if(!padapter->bup){ ret = -1; goto exit; } #if WIRELESS_EXT <= 20 if ((wrqu->essid.length-1) > IW_ESSID_MAX_SIZE){ #else if (wrqu->essid.length > IW_ESSID_MAX_SIZE){ #endif ret= -E2BIG; goto exit; } if(check_fwstate(pmlmepriv, WIFI_AP_STATE)) { ret = -1; goto exit; } authmode = padapter->securitypriv.ndisauthtype; DBG_8192C("=>%s\n",__FUNCTION__); if (wrqu->essid.flags && wrqu->essid.length) { // Commented by Albert 20100519 // We got the codes in "set_info" function of iwconfig source code. // ========================================= // wrq.u.essid.length = strlen(essid) + 1; // if(we_kernel_version > 20) // wrq.u.essid.length--; // ========================================= // That means, if the WIRELESS_EXT less than or equal to 20, the correct ssid len should subtract 1. #if WIRELESS_EXT <= 20 len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE; #else len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length : IW_ESSID_MAX_SIZE; #endif DBG_8192C("ssid=%s, len=%d\n", extra, wrqu->essid.length); _rtw_memset(&ndis_ssid, 0, sizeof(NDIS_802_11_SSID)); ndis_ssid.SsidLength = len; _rtw_memcpy(ndis_ssid.Ssid, extra, len); src_ssid = ndis_ssid.Ssid; RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid=[%s]\n", src_ssid)); _enter_critical_bh(&queue->lock, &irqL); phead = get_list_head(queue); pmlmepriv->pscanned = get_next(phead); while (1) { if (rtw_end_of_queue_search(phead, pmlmepriv->pscanned) == _TRUE) { #if 0 if(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) { rtw_set_802_11_ssid(padapter, &ndis_ssid); goto exit; } else { RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("rtw_wx_set_ssid(): scanned_queue is empty\n")); ret = -EINVAL; goto exit; } #endif RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_warning_, ("rtw_wx_set_essid: scan_q is empty, set ssid to check if scanning again!\n")); break; } pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list); pmlmepriv->pscanned = get_next(pmlmepriv->pscanned); dst_ssid = pnetwork->network.Ssid.Ssid; RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: dst_ssid=%s\n", pnetwork->network.Ssid.Ssid)); if ((_rtw_memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength) == _TRUE) && (pnetwork->network.Ssid.SsidLength==ndis_ssid.SsidLength)) { RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: find match, set infra mode\n")); if(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) { if(pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode) continue; } if (rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode) == _FALSE) { ret = -1; _exit_critical_bh(&queue->lock, &irqL); goto exit; } break; } } _exit_critical_bh(&queue->lock, &irqL); RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("set ssid: set_802_11_auth. mode=%d\n", authmode)); rtw_set_802_11_authentication_mode(padapter, authmode); //set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); if (rtw_set_802_11_ssid(padapter, &ndis_ssid) == _FALSE) { ret = -1; goto exit; } } exit: DBG_8192C("<=%s, ret %d\n",__FUNCTION__, ret); #ifdef DBG_IOCTL DBG_871X("DBG_IOCTL %s:%d return %d\n",__FUNCTION__, __LINE__, ret); #endif _func_exit_; return ret; } static int rtw_wx_get_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { u32 len,ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); WLAN_BSSID_EX *pcur_bss = &pmlmepriv->cur_network.network; RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,("rtw_wx_get_essid\n")); _func_enter_; if ( (check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) { len = pcur_bss->Ssid.SsidLength; wrqu->essid.length = len; _rtw_memcpy(extra, pcur_bss->Ssid.Ssid, len); wrqu->essid.flags = 1; } else { ret = -1; goto exit; } exit: _func_exit_; return ret; } static int rtw_wx_set_rate(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { int i, ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u8 datarates[NumRates]; u32 target_rate = wrqu->bitrate.value; u32 fixed = wrqu->bitrate.fixed; u32 ratevalue = 0; u8 mpdatarate[NumRates]={11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff}; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,(" rtw_wx_set_rate \n")); RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("target_rate = %d, fixed = %d\n",target_rate,fixed)); if(target_rate == -1){ ratevalue = 11; goto set_rate; } target_rate = target_rate/100000; switch(target_rate){ case 10: ratevalue = 0; break; case 20: ratevalue = 1; break; case 55: ratevalue = 2; break; case 60: ratevalue = 3; break; case 90: ratevalue = 4; break; case 110: ratevalue = 5; break; case 120: ratevalue = 6; break; case 180: ratevalue = 7; break; case 240: ratevalue = 8; break; case 360: ratevalue = 9; break; case 480: ratevalue = 10; break; case 540: ratevalue = 11; break; default: ratevalue = 11; break; } set_rate: for(i=0; i<NumRates; i++) { if(ratevalue==mpdatarate[i]) { datarates[i] = mpdatarate[i]; if(fixed == 0) break; } else{ datarates[i] = 0xff; } RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("datarate_inx=%d\n",datarates[i])); } if( rtw_setdatarate_cmd(padapter, datarates) !=_SUCCESS){ RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("rtw_wx_set_rate Fail!!!\n")); ret = -1; } _func_exit_; return ret; } static int rtw_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int i; u8 *p; u16 rate = 0, max_rate = 0, ht_cap=_FALSE; u32 ht_ielen = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; WLAN_BSSID_EX *pcur_bss = &pmlmepriv->cur_network.network; struct rtw_ieee80211_ht_cap *pht_capie; u8 bw_40MHz=0, short_GI=0; u16 mcs_rate=0; u8 rf_type = 0; struct registry_priv *pregpriv = &padapter->registrypriv; i=0; #ifdef CONFIG_MP_INCLUDED if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE) return -1; #endif if((check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) { p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12); if(p && ht_ielen>0) { ht_cap = _TRUE; pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2); _rtw_memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2); bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH) ? 1:0; short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1:0; } while( (pcur_bss->SupportedRates[i]!=0) && (pcur_bss->SupportedRates[i]!=0xFF)) { rate = pcur_bss->SupportedRates[i]&0x7F; if(rate>max_rate) max_rate = rate; wrqu->bitrate.fixed = 0; /* no auto select */ //wrqu->bitrate.disabled = 1/; i++; } if(ht_cap == _TRUE) { #if 0 //have some issue,neet to debug - 20101008-georgia if(mcs_rate&0x8000)//MCS15 { max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130); } else if(mcs_rate&0x0080)//MCS7 { max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65); } else//default MCS7 { //DBG_8192C("wx_get_rate, mcs_rate_bitmap=0x%x\n", mcs_rate); max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65); } #else padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type)); if(rf_type == RF_1T1R) max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65); else max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130); #endif max_rate = max_rate*2;//Mbps/2 wrqu->bitrate.value = max_rate*500000; } else { wrqu->bitrate.value = max_rate*500000; } } else { return -1; } return 0; } static int rtw_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_,_drv_info_,(" rtw_wx_get_rts \n")); wrqu->rts.value = padapter->registrypriv.rts_thresh; wrqu->rts.fixed = 0; /* no auto select */ //wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); _func_exit_; return 0; } static int rtw_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); _func_enter_; if (wrqu->frag.disabled) padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD; else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1; } _func_exit_; return 0; } static int rtw_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); _func_enter_; wrqu->frag.value = padapter->xmitpriv.frag_len; wrqu->frag.fixed = 0; /* no auto select */ //wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); _func_exit_; return 0; } static int rtw_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { //_adapter *padapter = (_adapter *)rtw_netdev_priv(dev); wrqu->retry.value = 7; wrqu->retry.fixed = 0; /* no auto select */ wrqu->retry.disabled = 1; return 0; } #if 0 #define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */ #define IW_ENCODE_FLAGS 0xFF00 /* Flags defined below */ #define IW_ENCODE_MODE 0xF000 /* Modes defined below */ #define IW_ENCODE_DISABLED 0x8000 /* Encoding disabled */ #define IW_ENCODE_ENABLED 0x0000 /* Encoding enabled */ #define IW_ENCODE_RESTRICTED 0x4000 /* Refuse non-encoded packets */ #define IW_ENCODE_OPEN 0x2000 /* Accept non-encoded packets */ #define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */ #define IW_ENCODE_TEMP 0x0400 /* Temporary key */ /* iwconfig wlan0 key on -> flags = 0x6001 -> maybe it means auto iwconfig wlan0 key off -> flags = 0x8800 iwconfig wlan0 key open -> flags = 0x2800 iwconfig wlan0 key open 1234567890 -> flags = 0x2000 iwconfig wlan0 key restricted -> flags = 0x4800 iwconfig wlan0 key open [3] 1234567890 -> flags = 0x2003 iwconfig wlan0 key restricted [2] 1234567890 -> flags = 0x4002 iwconfig wlan0 key open [3] -> flags = 0x2803 iwconfig wlan0 key restricted [2] -> flags = 0x4802 */ #endif static int rtw_wx_set_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { u32 key, ret = 0; u32 keyindex_provided; NDIS_802_11_WEP wep; NDIS_802_11_AUTHENTICATION_MODE authmode; struct iw_point *erq = &(wrqu->encoding); _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; DBG_8192C("+rtw_wx_set_enc, flags=0x%x\n", erq->flags); _rtw_memset(&wep, 0, sizeof(NDIS_802_11_WEP)); key = erq->flags & IW_ENCODE_INDEX; _func_enter_; if (erq->flags & IW_ENCODE_DISABLED) { DBG_8192C("EncryptionDisabled\n"); padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy=_NO_PRIVACY_; padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_Open; //open system authmode = Ndis802_11AuthModeOpen; padapter->securitypriv.ndisauthtype=authmode; goto exit; } if (key) { if (key > WEP_KEYS) return -EINVAL; key--; keyindex_provided = 1; } else { keyindex_provided = 0; key = padapter->securitypriv.dot11PrivacyKeyIndex; DBG_8192C("rtw_wx_set_enc, key=%d\n", key); } //set authentication mode if(erq->flags & IW_ENCODE_OPEN) { DBG_8192C("rtw_wx_set_enc():IW_ENCODE_OPEN\n"); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;//Ndis802_11EncryptionDisabled; #ifdef CONFIG_PLATFORM_MT53XX padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto; #else padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_Open; #endif padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy=_NO_PRIVACY_; authmode = Ndis802_11AuthModeOpen; padapter->securitypriv.ndisauthtype=authmode; } else if(erq->flags & IW_ENCODE_RESTRICTED) { DBG_8192C("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n"); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled; #ifdef CONFIG_PLATFORM_MT53XX padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto; #else padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_Shared; #endif padapter->securitypriv.dot11PrivacyAlgrthm=_WEP40_; padapter->securitypriv.dot118021XGrpPrivacy=_WEP40_; authmode = Ndis802_11AuthModeShared; padapter->securitypriv.ndisauthtype=authmode; } else { DBG_8192C("rtw_wx_set_enc():erq->flags=0x%x\n", erq->flags); padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;//Ndis802_11EncryptionDisabled; padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_Open; //open system padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy=_NO_PRIVACY_; authmode = Ndis802_11AuthModeOpen; padapter->securitypriv.ndisauthtype=authmode; } wep.KeyIndex = key; if (erq->length > 0) { wep.KeyLength = erq->length <= 5 ? 5 : 13; wep.Length = wep.KeyLength + FIELD_OFFSET(NDIS_802_11_WEP, KeyMaterial); } else { wep.KeyLength = 0 ; if(keyindex_provided == 1)// set key_id only, no given KeyMaterial(erq->length==0). { padapter->securitypriv.dot11PrivacyKeyIndex = key; DBG_8192C("(keyindex_provided == 1), keyid=%d, key_len=%d\n", key, padapter->securitypriv.dot11DefKeylen[key]); switch(padapter->securitypriv.dot11DefKeylen[key]) { case 5: padapter->securitypriv.dot11PrivacyAlgrthm=_WEP40_; break; case 13: padapter->securitypriv.dot11PrivacyAlgrthm=_WEP104_; break; default: padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_; break; } goto exit; } } wep.KeyIndex |= 0x80000000; _rtw_memcpy(wep.KeyMaterial, keybuf, wep.KeyLength); if (rtw_set_802_11_add_wep(padapter, &wep) == _FALSE) { if(rf_on == pwrpriv->rf_pwrstate ) ret = -EOPNOTSUPP; goto exit; } exit: _func_exit_; return ret; } static int rtw_wx_get_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { uint key, ret =0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *erq = &(wrqu->encoding); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); _func_enter_; if(check_fwstate(pmlmepriv, _FW_LINKED) != _TRUE) { if(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) != _TRUE) { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; return 0; } } key = erq->flags & IW_ENCODE_INDEX; if (key) { if (key > WEP_KEYS) return -EINVAL; key--; } else { key = padapter->securitypriv.dot11PrivacyKeyIndex; } erq->flags = key + 1; //if(padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen) //{ // erq->flags |= IW_ENCODE_OPEN; //} switch(padapter->securitypriv.ndisencryptstatus) { case Ndis802_11EncryptionNotSupported: case Ndis802_11EncryptionDisabled: erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; break; case Ndis802_11Encryption1Enabled: erq->length = padapter->securitypriv.dot11DefKeylen[key]; if(erq->length) { _rtw_memcpy(keybuf, padapter->securitypriv.dot11DefKey[key].skey, padapter->securitypriv.dot11DefKeylen[key]); erq->flags |= IW_ENCODE_ENABLED; if(padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen) { erq->flags |= IW_ENCODE_OPEN; } else if(padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeShared) { erq->flags |= IW_ENCODE_RESTRICTED; } } else { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; } break; case Ndis802_11Encryption2Enabled: case Ndis802_11Encryption3Enabled: erq->length = 16; erq->flags |= (IW_ENCODE_ENABLED | IW_ENCODE_OPEN | IW_ENCODE_NOKEY); break; default: erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; break; } _func_exit_; return ret; } static int rtw_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { //_adapter *padapter = (_adapter *)rtw_netdev_priv(dev); wrqu->power.value = 0; wrqu->power.fixed = 0; /* no auto select */ wrqu->power.disabled = 1; return 0; } static int rtw_wx_set_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length); return ret; } static int rtw_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_param *param = (struct iw_param*)&(wrqu->param); int ret = 0; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: break; case IW_AUTH_CIPHER_PAIRWISE: break; case IW_AUTH_CIPHER_GROUP: break; case IW_AUTH_KEY_MGMT: /* * ??? does not use these parameters */ break; case IW_AUTH_TKIP_COUNTERMEASURES: { if ( param->value ) { // wpa_supplicant is enabling the tkip countermeasure. padapter->securitypriv.btkip_countermeasure = _TRUE; } else { // wpa_supplicant is disabling the tkip countermeasure. padapter->securitypriv.btkip_countermeasure = _FALSE; } break; } case IW_AUTH_DROP_UNENCRYPTED: { /* HACK: * * wpa_supplicant calls set_wpa_enabled when the driver * is loaded and unloaded, regardless of if WPA is being * used. No other calls are made which can be used to * determine if encryption will be used or not prior to * association being expected. If encryption is not being * used, drop_unencrypted is set to false, else true -- we * can use this to determine if the CAP_PRIVACY_ON bit should * be set. */ if(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption1Enabled) { break;//it means init value, or using wep, ndisencryptstatus = Ndis802_11Encryption1Enabled, // then it needn't reset it; } if(param->value){ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_; padapter->securitypriv.dot118021XGrpPrivacy=_NO_PRIVACY_; padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_Open; //open system padapter->securitypriv.ndisauthtype=Ndis802_11AuthModeOpen; } break; } case IW_AUTH_80211_AUTH_ALG: #if defined(CONFIG_ANDROID) || 1 /* * It's the starting point of a link layer connection using wpa_supplicant */ if(check_fwstate(&padapter->mlmepriv, _FW_LINKED)) { rtw_disassoc_cmd(padapter); DBG_871X("%s...call rtw_indicate_disconnect\n ",__FUNCTION__); rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter, 1); } #endif ret = wpa_set_auth_algs(dev, (u32)param->value); break; case IW_AUTH_WPA_ENABLED: //if(param->value) // padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; //802.1x //else // padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;//open system //_disassociate(priv); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: //ieee->ieee802_1x = param->value; break; case IW_AUTH_PRIVACY_INVOKED: //ieee->privacy_invoked = param->value; break; default: return -EOPNOTSUPP; } return ret; } static int rtw_wx_set_enc_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { char *alg_name; u32 param_len; struct ieee_param *param = NULL; struct iw_point *pencoding = &wrqu->encoding; struct iw_encode_ext *pext = (struct iw_encode_ext *)extra; int ret=0; param_len = sizeof(struct ieee_param) + pext->key_len; param = (struct ieee_param *)rtw_malloc(param_len); if (param == NULL) return -1; _rtw_memset(param, 0, param_len); param->cmd = IEEE_CMD_SET_ENCRYPTION; _rtw_memset(param->sta_addr, 0xff, ETH_ALEN); switch (pext->alg) { case IW_ENCODE_ALG_NONE: //todo: remove key //remove = 1; alg_name = "none"; break; case IW_ENCODE_ALG_WEP: alg_name = "WEP"; break; case IW_ENCODE_ALG_TKIP: alg_name = "TKIP"; break; case IW_ENCODE_ALG_CCMP: alg_name = "CCMP"; break; default: return -1; } strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN); if(pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)//? { param->u.crypt.set_tx = 0; } if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)//? { param->u.crypt.set_tx = 1; } param->u.crypt.idx = (pencoding->flags&0x00FF) -1 ; if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { _rtw_memcpy(param->u.crypt.seq, pext->rx_seq, 8); } if(pext->key_len) { param->u.crypt.key_len = pext->key_len; //_rtw_memcpy(param + 1, pext + 1, pext->key_len); _rtw_memcpy(param->u.crypt.key, pext + 1, pext->key_len); } if (pencoding->flags & IW_ENCODE_DISABLED) { //todo: remove key //remove = 1; } ret = wpa_set_encryption(dev, param, param_len); if(param) { rtw_mfree((u8*)param, param_len); } return ret; } static int rtw_wx_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { //_adapter *padapter = (_adapter *)rtw_netdev_priv(dev); //struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); //struct security_priv *psecuritypriv = &padapter->securitypriv; if(extra) { wrqu->data.length = 14; wrqu->data.flags = 1; _rtw_memcpy(extra, "<WIFI@REALTEK>", 14); } //rtw_signal_process(pid, SIGUSR1); //for test //dump debug info here /* u32 dot11AuthAlgrthm; // 802.11 auth, could be open, shared, and 8021x u32 dot11PrivacyAlgrthm; // This specify the privacy for shared auth. algorithm. u32 dot118021XGrpPrivacy; // This specify the privacy algthm. used for Grp key u32 ndisauthtype; u32 ndisencryptstatus; */ //DBG_8192C("auth_alg=0x%x, enc_alg=0x%x, auth_type=0x%x, enc_type=0x%x\n", // psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm, // psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus); //DBG_8192C("enc_alg=0x%x\n", psecuritypriv->dot11PrivacyAlgrthm); //DBG_8192C("auth_type=0x%x\n", psecuritypriv->ndisauthtype); //DBG_8192C("enc_type=0x%x\n", psecuritypriv->ndisencryptstatus); #if 0 DBG_8192C("dbg(0x210)=0x%x\n", rtw_read32(padapter, 0x210)); DBG_8192C("dbg(0x608)=0x%x\n", rtw_read32(padapter, 0x608)); DBG_8192C("dbg(0x280)=0x%x\n", rtw_read32(padapter, 0x280)); DBG_8192C("dbg(0x284)=0x%x\n", rtw_read32(padapter, 0x284)); DBG_8192C("dbg(0x288)=0x%x\n", rtw_read32(padapter, 0x288)); DBG_8192C("dbg(0x664)=0x%x\n", rtw_read32(padapter, 0x664)); DBG_8192C("\n"); DBG_8192C("dbg(0x430)=0x%x\n", rtw_read32(padapter, 0x430)); DBG_8192C("dbg(0x438)=0x%x\n", rtw_read32(padapter, 0x438)); DBG_8192C("dbg(0x440)=0x%x\n", rtw_read32(padapter, 0x440)); DBG_8192C("dbg(0x458)=0x%x\n", rtw_read32(padapter, 0x458)); DBG_8192C("dbg(0x484)=0x%x\n", rtw_read32(padapter, 0x484)); DBG_8192C("dbg(0x488)=0x%x\n", rtw_read32(padapter, 0x488)); DBG_8192C("dbg(0x444)=0x%x\n", rtw_read32(padapter, 0x444)); DBG_8192C("dbg(0x448)=0x%x\n", rtw_read32(padapter, 0x448)); DBG_8192C("dbg(0x44c)=0x%x\n", rtw_read32(padapter, 0x44c)); DBG_8192C("dbg(0x450)=0x%x\n", rtw_read32(padapter, 0x450)); #endif return 0; } static int rtw_wx_read32(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u32 addr; u32 data32; addr = *(u32*)extra; data32 = rtw_read32(padapter, addr); sprintf(extra, "0x%08x", data32); return 0; } static int rtw_wx_write32(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u32 addr; u32 data32; addr = *(u32*)extra; data32 = *((u32*)extra + 1); rtw_write32(padapter, addr, data32); return 0; } static int rtw_wx_read_rf(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u32 path, addr, data32; path = *(u32*)extra; addr = *((u32*)extra + 1); data32 = padapter->HalFunc.read_rfreg(padapter, path, addr, 0xFFFFF); // DBG_8192C("%s: path=%d addr=0x%02x data=0x%05x\n", __func__, path, addr, data32); /* * IMPORTANT!! * Only when wireless private ioctl is at odd order, * "extra" would be copied to user space. */ sprintf(extra, "0x%05x", data32); return 0; } static int rtw_wx_write_rf(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u32 path, addr, data32; path = *(u32*)extra; addr = *((u32*)extra + 1); data32 = *((u32*)extra + 2); // DBG_8192C("%s: path=%d addr=0x%02x data=0x%05x\n", __func__, path, addr, data32); padapter->HalFunc.write_rfreg(padapter, path, addr, 0xFFFFF, data32); return 0; } static int rtw_wx_priv_null(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { return -1; } static int dummy(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { //_adapter *padapter = (_adapter *)rtw_netdev_priv(dev); //struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); //DBG_8192C("cmd_code=%x, fwstate=0x%x\n", a->cmd, get_fwstate(pmlmepriv)); return -1; } static int rtw_wx_set_channel_plan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct registry_priv *pregistrypriv = &padapter->registrypriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; extern int rtw_channel_plan; u8 channel_plan_req = (u8) (*((int *)wrqu)); #if 0 rtw_channel_plan = (int)wrqu->data.pointer; pregistrypriv->channel_plan = rtw_channel_plan; pmlmepriv->ChannelPlan = pregistrypriv->channel_plan; #endif if( _SUCCESS == rtw_set_chplan_cmd(padapter, channel_plan_req, 1) ) { DBG_871X("\n======== Set channel_plan = 0x%02X ========\n", pmlmepriv->ChannelPlan); } else return -EPERM; return 0; } static int rtw_wx_set_mtk_wps_probe_ie(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { #ifdef CONFIG_PLATFORM_MT53XX _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_notice_, ("WLAN IOCTL: cmd_code=%x, fwstate=0x%x\n", a->cmd, get_fwstate(pmlmepriv))); #endif return 0; } static int rtw_wx_get_sensitivity(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *buf) { #ifdef CONFIG_PLATFORM_MT53XX _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); // Modified by Albert 20110914 // This is in dbm format for MTK platform. wrqu->qual.level = padapter->recvpriv.rssi; DBG_8192C(" level = %u\n", wrqu->qual.level ); #endif return 0; } static int rtw_wx_set_mtk_wps_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { #ifdef CONFIG_PLATFORM_MT53XX _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); return rtw_set_wpa_ie(padapter, wrqu->data.pointer, wrqu->data.length); #else return 0; #endif } /* typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra); */ /* * For all data larger than 16 octets, we need to use a * pointer to memory allocated in user space. */ static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { #if 0 struct iw_point { void __user *pointer; /* Pointer to the data (in user space) */ __u16 length; /* number of fields or size in bytes */ __u16 flags; /* Optional params */ }; #endif #ifdef CONFIG_DRVEXT_MODULE u8 res; struct drvext_handler *phandler; struct drvext_oidparam *poidparam; int ret; u16 len; u8 *pparmbuf, bset; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *p = &wrqu->data; if( (!p->length) || (!p->pointer)){ ret = -EINVAL; goto _rtw_drvext_hdl_exit; } bset = (u8)(p->flags&0xFFFF); len = p->length; pparmbuf = (u8*)rtw_malloc(len); if (pparmbuf == NULL){ ret = -ENOMEM; goto _rtw_drvext_hdl_exit; } if(bset)//set info { if (copy_from_user(pparmbuf, p->pointer,len)) { rtw_mfree(pparmbuf, len); ret = -EFAULT; goto _rtw_drvext_hdl_exit; } } else//query info { } // poidparam = (struct drvext_oidparam *)pparmbuf; RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("drvext set oid subcode [%d], len[%d], InformationBufferLength[%d]\r\n", poidparam->subcode, poidparam->len, len)); //check subcode if ( poidparam->subcode >= MAX_DRVEXT_HANDLERS) { RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("no matching drvext handlers\r\n")); ret = -EINVAL; goto _rtw_drvext_hdl_exit; } if ( poidparam->subcode >= MAX_DRVEXT_OID_SUBCODES) { RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("no matching drvext subcodes\r\n")); ret = -EINVAL; goto _rtw_drvext_hdl_exit; } phandler = drvextoidhandlers + poidparam->subcode; if (poidparam->len != phandler->parmsize) { RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("no matching drvext param size %d vs %d\r\n", poidparam->len , phandler->parmsize)); ret = -EINVAL; goto _rtw_drvext_hdl_exit; } res = phandler->handler(&padapter->drvextpriv, bset, poidparam->data); if(res==0) { ret = 0; if (bset == 0x00) {//query info //_rtw_memcpy(p->pointer, pparmbuf, len); if (copy_to_user(p->pointer, pparmbuf, len)) ret = -EFAULT; } } else ret = -EFAULT; _rtw_drvext_hdl_exit: return ret; #endif return 0; } static void rtw_dbg_mode_hdl(_adapter *padapter, u32 id, u8 *pdata, u32 len) { pRW_Reg RegRWStruct; struct rf_reg_param *prfreg; u8 path; u8 offset; u32 value; DBG_8192C("%s\n", __FUNCTION__); switch(id) { case GEN_MP_IOCTL_SUBCODE(MP_START): DBG_8192C("871x_driver is only for normal mode, can't enter mp mode\n"); break; case GEN_MP_IOCTL_SUBCODE(READ_REG): RegRWStruct = (pRW_Reg)pdata; switch (RegRWStruct->width) { case 1: RegRWStruct->value = rtw_read8(padapter, RegRWStruct->offset); break; case 2: RegRWStruct->value = rtw_read16(padapter, RegRWStruct->offset); break; case 4: RegRWStruct->value = rtw_read32(padapter, RegRWStruct->offset); break; default: break; } break; case GEN_MP_IOCTL_SUBCODE(WRITE_REG): RegRWStruct = (pRW_Reg)pdata; switch (RegRWStruct->width) { case 1: rtw_write8(padapter, RegRWStruct->offset, (u8)RegRWStruct->value); break; case 2: rtw_write16(padapter, RegRWStruct->offset, (u16)RegRWStruct->value); break; case 4: rtw_write32(padapter, RegRWStruct->offset, (u32)RegRWStruct->value); break; default: break; } break; case GEN_MP_IOCTL_SUBCODE(READ_RF_REG): prfreg = (struct rf_reg_param *)pdata; path = (u8)prfreg->path; offset = (u8)prfreg->offset; value = padapter->HalFunc.read_rfreg(padapter, path, offset, 0xffffffff); prfreg->value = value; break; case GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG): prfreg = (struct rf_reg_param *)pdata; path = (u8)prfreg->path; offset = (u8)prfreg->offset; value = prfreg->value; padapter->HalFunc.write_rfreg(padapter, path, offset, 0xffffffff, value); break; case GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO): DBG_8192C("==> trigger gpio 0\n"); padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_TRIGGER_GPIO_0, 0); break; #ifdef CONFIG_BT_COEXIST case GEN_MP_IOCTL_SUBCODE(SET_DM_BT): DBG_8192C("==> set dm_bt_coexist:%x\n",*(u8 *)pdata); padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_BT_SET_COEXIST, pdata); break; case GEN_MP_IOCTL_SUBCODE(DEL_BA): DBG_8192C("==> delete ba:%x\n",*(u8 *)pdata); padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_BT_ISSUE_DELBA, pdata); break; #endif #ifdef DBG_CONFIG_ERROR_DETECT case GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS): if(padapter->HalFunc.sreset_get_wifi_status) *pdata = padapter->HalFunc.sreset_get_wifi_status(padapter); break; #endif default: break; } } static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; u32 BytesRead, BytesWritten, BytesNeeded; struct oid_par_priv oid_par; struct mp_ioctl_handler *phandler; struct mp_ioctl_param *poidparam; uint status=0; u16 len; u8 *pparmbuf = NULL, bset; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *p = &wrqu->data; //DBG_8192C("+rtw_mp_ioctl_hdl\n"); //mutex_lock(&ioctl_mutex); if ((!p->length) || (!p->pointer)) { ret = -EINVAL; goto _rtw_mp_ioctl_hdl_exit; } pparmbuf = NULL; bset = (u8)(p->flags & 0xFFFF); len = p->length; pparmbuf = (u8*)rtw_malloc(len); if (pparmbuf == NULL){ ret = -ENOMEM; goto _rtw_mp_ioctl_hdl_exit; } if (copy_from_user(pparmbuf, p->pointer, len)) { ret = -EFAULT; goto _rtw_mp_ioctl_hdl_exit; } poidparam = (struct mp_ioctl_param *)pparmbuf; RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", poidparam->subcode, poidparam->len, len)); if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) { RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); ret = -EINVAL; goto _rtw_mp_ioctl_hdl_exit; } //DBG_8192C("%s: %d\n", __func__, poidparam->subcode); #ifdef CONFIG_MP_INCLUDED phandler = mp_ioctl_hdl + poidparam->subcode; if ((phandler->paramsize != 0) && (poidparam->len < phandler->paramsize)) { RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext param size %d vs %d\r\n", poidparam->len, phandler->paramsize)); ret = -EINVAL; goto _rtw_mp_ioctl_hdl_exit; } if (phandler->handler) { oid_par.adapter_context = padapter; oid_par.oid = phandler->oid; oid_par.information_buf = poidparam->data; oid_par.information_buf_len = poidparam->len; oid_par.dbg = 0; BytesWritten = 0; BytesNeeded = 0; if (bset) { oid_par.bytes_rw = &BytesRead; oid_par.bytes_needed = &BytesNeeded; oid_par.type_of_oid = SET_OID; } else { oid_par.bytes_rw = &BytesWritten; oid_par.bytes_needed = &BytesNeeded; oid_par.type_of_oid = QUERY_OID; } status = phandler->handler(&oid_par); //todo:check status, BytesNeeded, etc. } else { DBG_8192C("rtw_mp_ioctl_hdl(): err!, subcode=%d, oid=%d, handler=%p\n", poidparam->subcode, phandler->oid, phandler->handler); ret = -EFAULT; goto _rtw_mp_ioctl_hdl_exit; } #else rtw_dbg_mode_hdl(padapter, poidparam->subcode, poidparam->data, poidparam->len); #endif if (bset == 0x00) {//query info if (copy_to_user(p->pointer, pparmbuf, len)) ret = -EFAULT; } if (status) { ret = -EFAULT; goto _rtw_mp_ioctl_hdl_exit; } _rtw_mp_ioctl_hdl_exit: if (pparmbuf) rtw_mfree(pparmbuf, len); //mutex_unlock(&ioctl_mutex); return ret; } static int rtw_get_ap_info(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int bssid_match, ret = 0; u32 cnt=0, wpa_ielen; _irqL irqL; _list *plist, *phead; unsigned char *pbuf; u8 bssid[ETH_ALEN]; char data[32]; struct wlan_network *pnetwork = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); _queue *queue = &(pmlmepriv->scanned_queue); struct iw_point *pdata = &wrqu->data; DBG_8192C("+rtw_get_aplist_info\n"); if((padapter->bDriverStopped) || (pdata==NULL)) { ret= -EINVAL; goto exit; } while((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) == _TRUE) { rtw_msleep_os(30); cnt++; if(cnt > 100) break; } //pdata->length = 0;//? pdata->flags = 0; if(pdata->length>=32) { if(copy_from_user(data, pdata->pointer, 32)) { ret= -EINVAL; goto exit; } } else { ret= -EINVAL; goto exit; } _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); while(1) { if (rtw_end_of_queue_search(phead,plist)== _TRUE) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); //if(hwaddr_aton_i(pdata->pointer, bssid)) if(hwaddr_aton_i(data, bssid)) { DBG_8192C("Invalid BSSID '%s'.\n", (u8*)data); _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); return -EINVAL; } if(_rtw_memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN) == _TRUE)//BSSID match, then check if supporting wpa/wpa2 { DBG_8192C("BSSID:" MAC_FMT "\n", MAC_ARG(bssid)); pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); if(pbuf && (wpa_ielen>0)) { pdata->flags = 1; break; } pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); if(pbuf && (wpa_ielen>0)) { pdata->flags = 2; break; } } plist = get_next(plist); } _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); if(pdata->length>=34) { if(copy_to_user((u8*)pdata->pointer+32, (u8*)&pdata->flags, 1)) { ret= -EINVAL; goto exit; } } exit: return ret; } static int rtw_set_pid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = rtw_netdev_priv(dev); int *pdata = (int *)wrqu; int selector; if((padapter->bDriverStopped) || (pdata==NULL)) { ret= -EINVAL; goto exit; } selector = *pdata; if(selector < 3 && selector >=0) { padapter->pid[selector] = *(pdata+1); #ifdef CONFIG_GLOBAL_UI_PID ui_pid[selector] = *(pdata+1); #endif DBG_871X("%s set pid[%d]=%d\n", __FUNCTION__, selector ,padapter->pid[selector]); } else DBG_871X("%s selector %d error\n", __FUNCTION__, selector); exit: return ret; } static int rtw_wps_start(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; u32 u32wps_start = 0; unsigned int uintRet = 0; uintRet = copy_from_user( ( void* ) &u32wps_start, pdata->pointer, 4 ); if((padapter->bDriverStopped) || (pdata==NULL)) { ret= -EINVAL; goto exit; } if ( u32wps_start == 0 ) { u32wps_start = *extra; } DBG_8192C( "[%s] wps_start = %d\n", __FUNCTION__, u32wps_start ); if ( u32wps_start == 1 ) // WPS Start { rtw_led_control(padapter, LED_CTL_START_WPS); } else if ( u32wps_start == 2 ) // WPS Stop because of wps success { rtw_led_control(padapter, LED_CTL_STOP_WPS); } else if ( u32wps_start == 3 ) // WPS Stop because of wps fail { rtw_led_control(padapter, LED_CTL_STOP_WPS_FAIL); } exit: return ret; } #ifdef CONFIG_P2P static int rtw_wext_p2p_enable(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; enum P2P_ROLE init_role = P2P_ROLE_DISABLE; if(*extra == '0' ) init_role = P2P_ROLE_DISABLE; else if(*extra == '1') init_role = P2P_ROLE_DEVICE; else if(*extra == '2') init_role = P2P_ROLE_CLIENT; else if(*extra == '3') init_role = P2P_ROLE_GO; if(_FAIL == rtw_p2p_enable(padapter, init_role)) { ret = -EFAULT; goto exit; } //set channel/bandwidth if(init_role != P2P_ROLE_DISABLE) { u8 channel, ch_offset; u16 bwmode; if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN)) { // Stay at the listen state and wait for discovery. channel = pwdinfo->listen_channel; ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; bwmode = HT_CHANNEL_WIDTH_20; } else { pwdinfo->operating_channel = pmlmeext->cur_channel; channel = pwdinfo->operating_channel; ch_offset = pmlmeext->cur_ch_offset; bwmode = pmlmeext->cur_bwmode; } set_channel_bwmode(padapter, channel, ch_offset, bwmode); } exit: return ret; } static int rtw_p2p_set_go_nego_ssid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); DBG_8192C( "[%s] ssid = %s, len = %d\n", __FUNCTION__, extra, strlen( extra ) ); _rtw_memcpy( pwdinfo->nego_ssid, extra, strlen( extra ) ); pwdinfo->nego_ssidlen = strlen( extra ); return ret; } static int rtw_p2p_set_intent(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); u8 intent = pwdinfo->intent; switch( wrqu->data.length ) { case 1: { intent = extra[ 0 ] - '0'; break; } case 2: { intent = str_2char2num( extra[ 0 ], extra[ 1 ]); break; } } if ( intent <= 15 ) { pwdinfo->intent= intent; } else { ret = -1; } DBG_8192C( "[%s] intent = %d\n", __FUNCTION__, intent); return ret; } static int rtw_p2p_set_listen_ch(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); u8 listen_ch = pwdinfo->listen_channel; // Listen channel number switch( wrqu->data.length ) { case 1: { listen_ch = extra[ 0 ] - '0'; break; } case 2: { listen_ch = str_2char2num( extra[ 0 ], extra[ 1 ]); break; } } if ( listen_ch > 0 && listen_ch <= 13 ) { pwdinfo->listen_channel = listen_ch; set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); } else { ret = -1; } DBG_8192C( "[%s] listen_ch = %d\n", __FUNCTION__, pwdinfo->listen_channel ); return ret; } static int rtw_p2p_set_op_ch(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { // Commented by Albert 20110524 // This function is used to set the operating channel if the driver will become the group owner int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); u8 op_ch = pwdinfo->operating_channel; // Operating channel number switch( wrqu->data.length ) { case 1: { op_ch = extra[ 0 ] - '0'; break; } case 2: { op_ch = str_2char2num( extra[ 0 ], extra[ 1 ]); break; } } if ( op_ch > 0 && op_ch <= 13 ) { pwdinfo->operating_channel = op_ch; } else { ret = -1; } DBG_8192C( "[%s] op_ch = %d\n", __FUNCTION__, pwdinfo->operating_channel ); return ret; } static int rtw_p2p_profilefound(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); // Comment by Albert 2010/10/13 // Input data format: // Ex: 0 // Ex: 1XX:XX:XX:XX:XX:XXYYSSID // 0 => Reflush the profile record list. // 1 => Add the profile list // XX:XX:XX:XX:XX:XX => peer's MAC Address ( ex: 00:E0:4C:00:00:01 ) // YY => SSID Length // SSID => SSID for persistence group DBG_8192C( "[%s] In value = %s, len = %d \n", __FUNCTION__, extra, wrqu->data.length -1); // The upper application should pass the SSID to driver by using this rtw_p2p_profilefound function. if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) { if ( extra[ 0 ] == '0' ) { // Remove all the profile information of wifidirect_info structure. _rtw_memset( &pwdinfo->profileinfo[ 0 ], 0x00, sizeof( struct profile_info ) * P2P_MAX_PERSISTENT_GROUP_NUM ); pwdinfo->profileindex = 0; } else { if ( pwdinfo->profileindex >= P2P_MAX_PERSISTENT_GROUP_NUM ) { ret = -1; } else { int jj, kk; // Add this profile information into pwdinfo->profileinfo // Ex: 1XX:XX:XX:XX:XX:XXYYSSID for( jj = 0, kk = 1; jj < ETH_ALEN; jj++, kk += 3 ) { pwdinfo->profileinfo[ pwdinfo->profileindex ].peermac[ jj ] = key_2char2num(extra[ kk ], extra[ kk+ 1 ]); } pwdinfo->profileinfo[ pwdinfo->profileindex ].ssidlen = ( extra[18] - '0' ) * 10 + ( extra[ 19 ] - '0' ); _rtw_memcpy( pwdinfo->profileinfo[ pwdinfo->profileindex ].ssid, &extra[ 20 ], pwdinfo->profileinfo[ pwdinfo->profileindex ].ssidlen ); pwdinfo->profileindex++; } } } return ret; } static int rtw_p2p_setDN(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); pwdinfo->device_name_len = wrqu->data.length - 1; _rtw_memset( pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN ); _rtw_memcpy( pwdinfo->device_name, extra, pwdinfo->device_name_len ); return ret; } static int rtw_p2p_get_status(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); DBG_8192C( "[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __FUNCTION__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), pwdinfo->p2p_peer_interface_addr[ 0 ], pwdinfo->p2p_peer_interface_addr[ 1 ], pwdinfo->p2p_peer_interface_addr[ 2 ], pwdinfo->p2p_peer_interface_addr[ 3 ], pwdinfo->p2p_peer_interface_addr[ 4 ], pwdinfo->p2p_peer_interface_addr[ 5 ]); // Commented by Albert 2010/10/12 // Because of the output size limitation, I had removed the "Role" information. // About the "Role" information, we will use the new private IOCTL to get the "Role" information. sprintf( extra, "\n\nStatus=%.2d\n", rtw_p2p_state(pwdinfo) ); wrqu->data.length = strlen( extra ); if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN)) { // Stay at the listen state and wait for discovery. set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); } return ret; } // Commented by Albert 20110520 // This function will return the config method description // This config method description will show us which config method the remote P2P device is intented to use // by sending the provisioning discovery request frame. static int rtw_p2p_get_req_cm(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); sprintf( extra, "\n\nCM=%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req ); wrqu->data.length = strlen( extra ); return ret; } static int rtw_p2p_get_role(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); DBG_8192C( "[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __FUNCTION__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), pwdinfo->p2p_peer_interface_addr[ 0 ], pwdinfo->p2p_peer_interface_addr[ 1 ], pwdinfo->p2p_peer_interface_addr[ 2 ], pwdinfo->p2p_peer_interface_addr[ 3 ], pwdinfo->p2p_peer_interface_addr[ 4 ], pwdinfo->p2p_peer_interface_addr[ 5 ]); sprintf( extra, "\n\nRole=%.2d\n", rtw_p2p_role(pwdinfo) ); wrqu->data.length = strlen( extra ); return ret; } static int rtw_p2p_get_peer_ifaddr(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); DBG_8192C( "[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __FUNCTION__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), pwdinfo->p2p_peer_interface_addr[ 0 ], pwdinfo->p2p_peer_interface_addr[ 1 ], pwdinfo->p2p_peer_interface_addr[ 2 ], pwdinfo->p2p_peer_interface_addr[ 3 ], pwdinfo->p2p_peer_interface_addr[ 4 ], pwdinfo->p2p_peer_interface_addr[ 5 ]); sprintf( extra, "\nMAC %.2X:%.2X:%.2X:%.2X:%.2X:%.2X", pwdinfo->p2p_peer_interface_addr[ 0 ], pwdinfo->p2p_peer_interface_addr[ 1 ], pwdinfo->p2p_peer_interface_addr[ 2 ], pwdinfo->p2p_peer_interface_addr[ 3 ], pwdinfo->p2p_peer_interface_addr[ 4 ], pwdinfo->p2p_peer_interface_addr[ 5 ]); wrqu->data.length = strlen( extra ); return ret; } static int rtw_p2p_get_peer_devaddr(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); DBG_8192C( "[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __FUNCTION__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), pwdinfo->rx_prov_disc_info.peerDevAddr[ 0 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 1 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 2 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 3 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 4 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 5 ]); sprintf( extra, "\n%.2X%.2X%.2X%.2X%.2X%.2X", pwdinfo->rx_prov_disc_info.peerDevAddr[ 0 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 1 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 2 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 3 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 4 ], pwdinfo->rx_prov_disc_info.peerDevAddr[ 5 ]); wrqu->data.length = strlen( extra ); return ret; } static int rtw_p2p_get_groupid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); sprintf( extra, "\n%.2X:%.2X:%.2X:%.2X:%.2X:%.2X-%s", pwdinfo->groupid_info.go_device_addr[ 0 ], pwdinfo->groupid_info.go_device_addr[ 1 ], pwdinfo->groupid_info.go_device_addr[ 2 ], pwdinfo->groupid_info.go_device_addr[ 3 ], pwdinfo->groupid_info.go_device_addr[ 4 ], pwdinfo->groupid_info.go_device_addr[ 5 ], pwdinfo->groupid_info.ssid); wrqu->data.length = strlen( extra ); return ret; } static int rtw_p2p_get_wps_configmethod(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); u8 peerMAC[ ETH_ALEN ] = { 0x00 }; int jj,kk; u8 peerMACStr[ 17 ] = { 0x00 }; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _irqL irqL; _list *plist, *phead; _queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; u8 blnMatch = 0; u16 attr_content = 0; uint attr_contentlen = 0; //6 is the string "wpsCM=", 17 is the MAC addr, we have to clear it at wrqu->data.pointer u8 attr_content_str[ 6 + 17 ] = { 0x00 }; // Commented by Albert 20110727 // The input data is the MAC address which the application wants to know its WPS config method. // After knowing its WPS config method, the application can decide the config method for provisioning discovery. // Format: iwpriv wlanx p2p_get_wpsCM 00:E0:4C:00:00:05 DBG_8192C( "[%s] data = %s\n", __FUNCTION__, ( char* ) extra ); //_rtw_memcpy( peerMACStr , extra , 17 ); if ( copy_from_user(peerMACStr, wrqu->data.pointer + 6 , 17) ) { return -EFAULT; } for( jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3 ) { peerMAC[ jj ] = key_2char2num( peerMACStr[kk], peerMACStr[kk+ 1] ); } _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); while(1) { if (rtw_end_of_queue_search(phead,plist)== _TRUE) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); if ( _rtw_memcmp( pnetwork->network.MacAddress, peerMAC, ETH_ALEN ) ) { u8 *wpsie; uint wpsie_len = 0; // The mac address is matched. if ( (wpsie=rtw_get_wps_ie( &pnetwork->network.IEs[ 12 ], pnetwork->network.IELength - 12, NULL, &wpsie_len )) ) { rtw_get_wps_attr_content( wpsie, wpsie_len, WPS_ATTR_CONF_METHOD, ( u8* ) &attr_content, &attr_contentlen); if ( attr_contentlen ) { attr_content = be16_to_cpu( attr_content ); sprintf( attr_content_str, "\n\nM=%.4d", attr_content ); blnMatch = 1; } } break; } plist = get_next(plist); } _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); if ( !blnMatch ) { sprintf( attr_content_str, "\n\nM=0000" ); } if ( copy_to_user(wrqu->data.pointer, attr_content_str, 6 + 17)) { return -EFAULT; } return ret; } #ifdef CONFIG_WFD static int rtw_p2p_get_peer_WFD_port(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); DBG_871X( "[%s] p2p_state = %d\n", __FUNCTION__, rtw_p2p_state(pwdinfo) ); sprintf( extra, "\n\nPort=%d\n", pwdinfo->wfd_info.peer_rtsp_ctrlport ); DBG_8192C( "[%s] remote port = %d\n", __FUNCTION__, pwdinfo->wfd_info.peer_rtsp_ctrlport ); wrqu->data.length = strlen( extra ); return ret; } #endif // CONFIG_WFD static int rtw_p2p_get_device_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); u8 peerMAC[ ETH_ALEN ] = { 0x00 }; int jj,kk; u8 peerMACStr[ 17 ] = { 0x00 }; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _irqL irqL; _list *plist, *phead; _queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; u8 blnMatch = 0; u8 dev_name[ WPS_MAX_DEVICE_NAME_LEN ] = { 0x00 }; uint dev_len = 0; u8 dev_name_str[ WPS_MAX_DEVICE_NAME_LEN + 5 ] = { 0x00 }; // +5 is for the str "devN=", we have to clear it at wrqu->data.pointer // Commented by Kurt 20110727 // The input data is the MAC address which the application wants to know its device name. // Such user interface could show peer device's device name instead of ssid. // Format: iwpriv wlanx p2p_get_wpsCM 00:E0:4C:00:00:05 DBG_8192C( "[%s] data = %s\n", __FUNCTION__, ( char* ) extra ); //_rtw_memcpy( peerMACStr , extra , 17 ); if ( copy_from_user(peerMACStr, wrqu->data.pointer + 5 , 17) ) { return -EFAULT; } for( jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3 ) { peerMAC[ jj ] = key_2char2num( peerMACStr[kk], peerMACStr[kk+ 1] ); } _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); while(1) { if (rtw_end_of_queue_search(phead,plist)== _TRUE) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); if ( _rtw_memcmp( pnetwork->network.MacAddress, peerMAC, ETH_ALEN ) ) { u8 *wpsie; uint wpsie_len = 0; // The mac address is matched. if ( (wpsie=rtw_get_wps_ie( &pnetwork->network.IEs[ 12 ], pnetwork->network.IELength - 12, NULL, &wpsie_len )) ) { rtw_get_wps_attr_content( wpsie, wpsie_len, WPS_ATTR_DEVICE_NAME, dev_name, &dev_len); if ( dev_len ) { sprintf( dev_name_str, "\n\nN=%s", dev_name ); blnMatch = 1; } } break; } plist = get_next(plist); } _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); if ( !blnMatch ) { sprintf( dev_name_str, "\n\nN=0000" ); } if ( copy_to_user(wrqu->data.pointer, dev_name_str, 5+ (( dev_len > 17 )? dev_len : 17) )) { return -EFAULT; } return ret; } static int rtw_p2p_connect(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); u8 peerMAC[ ETH_ALEN ] = { 0x00 }; int jj,kk; u8 peerMACStr[ ETH_ALEN * 2 ] = { 0x00 }; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _irqL irqL; _list *plist, *phead; _queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; uint uintPeerChannel = 0; // Commented by Albert 20110304 // The input data contains two informations. // 1. First information is the MAC address which wants to formate with // 2. Second information is the WPS PINCode or "pbc" string for push button method // Format: 00:E0:4C:00:00:05 // Format: 00:E0:4C:00:00:05 DBG_8192C( "[%s] data = %s\n", __FUNCTION__, extra ); if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) { DBG_8192C( "[%s] WiFi Direct is disable!\n", __FUNCTION__ ); return ret; } if ( pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO ) { return -1; } for( jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3 ) { peerMAC[ jj ] = key_2char2num( extra[kk], extra[kk+ 1] ); } _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); while(1) { if (rtw_end_of_queue_search(phead,plist)== _TRUE) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); if ( _rtw_memcmp( pnetwork->network.MacAddress, peerMAC, ETH_ALEN ) ) { uintPeerChannel = pnetwork->network.Configuration.DSConfig; break; } plist = get_next(plist); } _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); if ( uintPeerChannel ) { _rtw_memset( &pwdinfo->nego_req_info, 0x00, sizeof( struct tx_nego_req_info ) ); _rtw_memset( &pwdinfo->groupid_info, 0x00, sizeof( struct group_id_info ) ); pwdinfo->nego_req_info.peer_channel_num[ 0 ] = uintPeerChannel; _rtw_memcpy( pwdinfo->nego_req_info.peerDevAddr, pnetwork->network.MacAddress, ETH_ALEN ); pwdinfo->nego_req_info.benable = _TRUE; rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo)); rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING); DBG_8192C( "[%s] Start PreTx Procedure!\n", __FUNCTION__ ); _set_timer( &pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT ); _set_timer( &pwdinfo->restore_p2p_state_timer, P2P_GO_NEGO_TIMEOUT ); } else { DBG_8192C( "[%s] Not Found in Scanning Queue~\n", __FUNCTION__ ); ret = -1; } exit: return ret; } static int rtw_p2p_prov_disc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); u8 peerMAC[ ETH_ALEN ] = { 0x00 }; int jj,kk; u8 peerMACStr[ ETH_ALEN * 2 ] = { 0x00 }; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _list *plist, *phead; _queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; uint uintPeerChannel = 0; u8 attr_content[50] = { 0x00 }, _status = 0; u8 *p2pie; uint p2pielen = 0, attr_contentlen = 0; _irqL irqL; // Commented by Albert 20110301 // The input data contains two informations. // 1. First information is the MAC address which wants to issue the provisioning discovery request frame. // 2. Second information is the WPS configuration method which wants to discovery // Format: 00:E0:4C:00:00:05_display // Format: 00:E0:4C:00:00:05_keypad // Format: 00:E0:4C:00:00:05_pbc // Format: 00:E0:4C:00:00:05_label DBG_8192C( "[%s] data = %s\n", __FUNCTION__, extra ); if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) { DBG_8192C( "[%s] WiFi Direct is disable!\n", __FUNCTION__ ); return ret; } else { // Reset the content of struct tx_provdisc_req_info excluded the wps_config_method_request. _rtw_memset( pwdinfo->tx_prov_disc_info.peerDevAddr, 0x00, ETH_ALEN ); _rtw_memset( pwdinfo->tx_prov_disc_info.peerIFAddr, 0x00, ETH_ALEN ); _rtw_memset( &pwdinfo->tx_prov_disc_info.ssid, 0x00, sizeof( NDIS_802_11_SSID ) ); pwdinfo->tx_prov_disc_info.peer_channel_num[ 0 ] = 0; pwdinfo->tx_prov_disc_info.peer_channel_num[ 1 ] = 0; pwdinfo->tx_prov_disc_info.benable = _FALSE; } for( jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3 ) { peerMAC[ jj ] = key_2char2num( extra[kk], extra[kk+ 1] ); } if ( _rtw_memcmp( &extra[ 18 ], "display", 7 ) ) { pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_DISPLYA; } else if ( _rtw_memcmp( &extra[ 18 ], "keypad", 7 ) ) { pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_KEYPAD; } else if ( _rtw_memcmp( &extra[ 18 ], "pbc", 3 ) ) { pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_PUSH_BUTTON; } else if ( _rtw_memcmp( &extra[ 18 ], "label", 5 ) ) { pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_LABEL; } else { DBG_8192C( "[%s] Unknown WPS config methodn", __FUNCTION__ ); return( ret ); } _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); while(1) { if (rtw_end_of_queue_search(phead,plist)== _TRUE) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); // Commented by Albert 2011/05/18 // Match the device address located in the P2P IE // This is for the case that the P2P device address is not the same as the P2P interface address. if ( (p2pie=rtw_get_p2p_ie( &pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen)) ) { // The P2P Device ID attribute is included in the Beacon frame. // The P2P Device Info attribute is included in the probe response frame. if ( rtw_get_p2p_attr_content( p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen) ) { // Handle the P2P Device ID attribute of Beacon first if ( _rtw_memcmp( attr_content, peerMAC, ETH_ALEN ) ) { uintPeerChannel = pnetwork->network.Configuration.DSConfig; break; } } else if ( rtw_get_p2p_attr_content( p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen) ) { // Handle the P2P Device Info attribute of probe response if ( _rtw_memcmp( attr_content, peerMAC, ETH_ALEN ) ) { uintPeerChannel = pnetwork->network.Configuration.DSConfig; break; } } } plist = get_next(plist); } _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); if ( uintPeerChannel ) { _rtw_memcpy( pwdinfo->tx_prov_disc_info.peerIFAddr, pnetwork->network.MacAddress, ETH_ALEN ); _rtw_memcpy( pwdinfo->tx_prov_disc_info.peerDevAddr, peerMAC, ETH_ALEN ); pwdinfo->tx_prov_disc_info.peer_channel_num[0] = ( u16 ) uintPeerChannel; pwdinfo->tx_prov_disc_info.benable = _TRUE; rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo)); rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ); if(rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) { _rtw_memcpy( &pwdinfo->tx_prov_disc_info.ssid, &pnetwork->network.Ssid, sizeof( NDIS_802_11_SSID ) ); } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) { _rtw_memcpy( pwdinfo->tx_prov_disc_info.ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN ); pwdinfo->tx_prov_disc_info.ssid.SsidLength= P2P_WILDCARD_SSID_LEN; } set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); _set_timer( &pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT ); _set_timer( &pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT ); } else { DBG_8192C( "[%s] NOT Found in the Scanning Queue!\n", __FUNCTION__ ); } exit: return ret; } // Added by Albert 20110328 // This function is used to inform the driver the user had specified the pin code value or pbc // to application. static int rtw_p2p_got_wpsinfo(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo = &( padapter->wdinfo ); DBG_8192C( "[%s] data = %s\n", __FUNCTION__, extra ); // Added by Albert 20110328 // if the input data is P2P_NO_WPSINFO -> reset the wpsinfo // if the input data is P2P_GOT_WPSINFO_PEER_DISPLAY_PIN -> the utility just input the PIN code got from the peer P2P device. // if the input data is P2P_GOT_WPSINFO_SELF_DISPLAY_PIN -> the utility just got the PIN code from itself. // if the input data is P2P_GOT_WPSINFO_PBC -> the utility just determine to use the PBC if ( *extra == '0' ) { pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO; } else if ( *extra == '1' ) { pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PEER_DISPLAY_PIN; } else if ( *extra == '2' ) { pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_SELF_DISPLAY_PIN; } else if ( *extra == '3' ) { pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PBC; } else { pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO; } return ret; } #endif //CONFIG_P2P static int rtw_p2p_set(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_P2P _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; DBG_8192C( "[%s] extra = %s\n", __FUNCTION__, extra ); if ( _rtw_memcmp( extra, "enable=", 7 ) ) { rtw_wext_p2p_enable( dev, info, wrqu, &extra[7] ); } else if ( _rtw_memcmp( extra, "setDN=", 6 ) ) { wrqu->data.length -= 6; rtw_p2p_setDN( dev, info, wrqu, &extra[6] ); } else if ( _rtw_memcmp( extra, "profilefound=", 13 ) ) { wrqu->data.length -= 13; rtw_p2p_profilefound( dev, info, wrqu, &extra[13] ); } else if ( _rtw_memcmp( extra, "prov_disc=", 10 ) ) { wrqu->data.length -= 10; rtw_p2p_prov_disc( dev, info, wrqu, &extra[10] ); } else if ( _rtw_memcmp( extra, "nego=", 5 ) ) { wrqu->data.length -= 5; rtw_p2p_connect( dev, info, wrqu, &extra[5] ); } else if ( _rtw_memcmp( extra, "intent=", 7 ) ) { // Commented by Albert 2011/03/23 // The wrqu->data.length will include the null character // So, we will decrease 7 + 1 wrqu->data.length -= 8; rtw_p2p_set_intent( dev, info, wrqu, &extra[7] ); } else if ( _rtw_memcmp( extra, "ssid=", 5 ) ) { wrqu->data.length -= 5; rtw_p2p_set_go_nego_ssid( dev, info, wrqu, &extra[5] ); } else if ( _rtw_memcmp( extra, "got_wpsinfo=", 12 ) ) { wrqu->data.length -= 12; rtw_p2p_got_wpsinfo( dev, info, wrqu, &extra[12] ); } else if ( _rtw_memcmp( extra, "listen_ch=", 10 ) ) { // Commented by Albert 2011/05/24 // The wrqu->data.length will include the null character // So, we will decrease (10 + 1) wrqu->data.length -= 11; rtw_p2p_set_listen_ch( dev, info, wrqu, &extra[10] ); } else if ( _rtw_memcmp( extra, "op_ch=", 6 ) ) { // Commented by Albert 2011/05/24 // The wrqu->data.length will include the null character // So, we will decrease (6 + 1) wrqu->data.length -= 7; rtw_p2p_set_op_ch( dev, info, wrqu, &extra[6] ); } #endif //CONFIG_P2P return ret; } static int rtw_p2p_get(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_P2P _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; DBG_8192C( "[%s] extra = %s\n", __FUNCTION__, (char*) wrqu->data.pointer ); if ( _rtw_memcmp( wrqu->data.pointer, "status", 6 ) ) { rtw_p2p_get_status( dev, info, wrqu, extra ); } else if ( _rtw_memcmp( wrqu->data.pointer, "role", 4 ) ) { rtw_p2p_get_role( dev, info, wrqu, extra); } else if ( _rtw_memcmp( wrqu->data.pointer, "peer_ifa", 8 ) ) { rtw_p2p_get_peer_ifaddr( dev, info, wrqu, extra); } else if ( _rtw_memcmp( wrqu->data.pointer, "req_cm", 6 ) ) { rtw_p2p_get_req_cm( dev, info, wrqu, extra); } else if ( _rtw_memcmp( wrqu->data.pointer, "peer_deva", 9 ) ) { rtw_p2p_get_peer_devaddr( dev, info, wrqu, extra); } else if ( _rtw_memcmp( wrqu->data.pointer, "group_id", 8 ) ) { rtw_p2p_get_groupid( dev, info, wrqu, extra); } #ifdef CONFIG_WFD else if ( _rtw_memcmp( wrqu->data.pointer, "peer_port", 9 ) ) { rtw_p2p_get_peer_WFD_port( dev, info, wrqu, extra ); } #endif // CONFIG_WFD #endif //CONFIG_P2P return ret; } static int rtw_p2p_get2(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_P2P _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct iw_point *pdata = &wrqu->data; struct wifidirect_info *pwdinfo= &(padapter->wdinfo); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; DBG_8192C( "[%s] extra = %s\n", __FUNCTION__, (char*) wrqu->data.pointer ); if ( _rtw_memcmp( extra, "wpsCM=", 6 ) ) { wrqu->data.length -= 6; rtw_p2p_get_wps_configmethod( dev, info, wrqu, &extra[6]); } else if ( _rtw_memcmp( extra, "devN=", 5 ) ) { wrqu->data.length -= 5; rtw_p2p_get_device_name( dev, info, wrqu, &extra[5] ); } #endif //CONFIG_P2P return ret; } extern char *ifname; extern int rtw_change_ifname(_adapter *padapter, const char *ifname); static int rtw_rereg_nd_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = rtw_netdev_priv(dev); struct rereg_nd_name_data *rereg_priv = &padapter->rereg_nd_name_priv; char new_ifname[IFNAMSIZ]; if(rereg_priv->old_ifname[0] == 0) { strncpy(rereg_priv->old_ifname, ifname, IFNAMSIZ); rereg_priv->old_ifname[IFNAMSIZ-1] = 0; } //DBG_871X("%s wrqu->data.length:%d\n", __FUNCTION__, wrqu->data.length); if(wrqu->data.length > IFNAMSIZ) return -EFAULT; if ( copy_from_user(new_ifname, wrqu->data.pointer, IFNAMSIZ) ) { return -EFAULT; } if( 0 == strcmp(rereg_priv->old_ifname, new_ifname) ) { return ret; } DBG_871X("%s new_ifname:%s\n", __FUNCTION__, new_ifname); if( 0 != (ret = rtw_change_ifname(padapter, new_ifname)) ) { goto exit; } if(_rtw_memcmp(rereg_priv->old_ifname, "disable%d", 9) == _TRUE) { padapter->ledpriv.bRegUseLed= rereg_priv->old_bRegUseLed; rtw_sw_led_init(padapter); rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode); } strncpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ); rereg_priv->old_ifname[IFNAMSIZ-1] = 0; if(_rtw_memcmp(new_ifname, "disable%d", 9) == _TRUE) { DBG_871X("%s disable\n", __FUNCTION__); // free network queue for Android's timming issue rtw_free_network_queue(padapter, _TRUE); // close led rtw_led_control(padapter, LED_CTL_POWER_OFF); rereg_priv->old_bRegUseLed = padapter->ledpriv.bRegUseLed; padapter->ledpriv.bRegUseLed= _FALSE; rtw_sw_led_deinit(padapter); // the interface is being "disabled", we can do deeper IPS rereg_priv->old_ips_mode = rtw_get_ips_mode_req(&padapter->pwrctrlpriv); rtw_ips_mode_req(&padapter->pwrctrlpriv, IPS_NORMAL); } exit: return ret; } #if 0 void mac_reg_dump(_adapter *padapter) { int i,j=1; DBG_8192C("\n======= MAC REG =======\n"); for(i=0x0;i<0x300;i+=4) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } for(i=0x400;i<0x800;i+=4) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } } void bb_reg_dump(_adapter *padapter) { int i,j=1; DBG_8192C("\n======= BB REG =======\n"); for(i=0x800;i<0x1000;i+=4) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } } void rf_reg_dump(_adapter *padapter) { int i,j=1,path; u32 value; DBG_8192C("\n======= RF REG =======\n"); for(path=0;path<2;path++) { DBG_8192C("\nRF_Path(%x)\n",path); for(i=0;i<0x100;i++) { value = PHY_QueryRFReg(padapter, (RF90_RADIO_PATH_E)path,i, bMaskDWord); if(j%4==1) DBG_8192C("0x%02x ",i); DBG_8192C(" 0x%08x ",value); if((j++)%4==0) DBG_8192C("\n"); } } } #endif void mac_reg_dump(_adapter *padapter) { int i,j=1; DBG_8192C("\n======= MAC REG =======\n"); for(i=0x0;i<0x300;i+=4) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } for(i=0x400;i<0x800;i+=4) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } } void bb_reg_dump(_adapter *padapter) { int i,j=1; DBG_8192C("\n======= BB REG =======\n"); for(i=0x800;i<0x1000;i+=4) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } } void rf_reg_dump(_adapter *padapter) { int i,j=1,path; u32 value; u8 rf_type,path_nums = 0; padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type)); DBG_8192C("\n======= RF REG =======\n"); if((RF_1T2R == rf_type) ||(RF_1T1R ==rf_type )) path_nums = 1; else path_nums = 2; for(path=0;path<path_nums;path++) { DBG_8192C("\nRF_Path(%x)\n",path); for(i=0;i<0x100;i++) { //value = PHY_QueryRFReg(padapter, (RF90_RADIO_PATH_E)path,i, bMaskDWord); value =padapter->HalFunc.read_rfreg(padapter, path, i, 0xffffffff); if(j%4==1) DBG_8192C("0x%02x ",i); DBG_8192C(" 0x%08x ",value); if((j++)%4==0) DBG_8192C("\n"); } } } #ifdef CONFIG_IOL #include <rtw_iol.h> #endif static int rtw_dbg_port(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _irqL irqL; int ret = 0; u8 major_cmd, minor_cmd; u16 arg; u32 extra_arg, *pdata, val32; struct sta_info *psta; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct security_priv *psecuritypriv = &padapter->securitypriv; struct wlan_network *cur_network = &(pmlmepriv->cur_network); struct sta_priv *pstapriv = &padapter->stapriv; pdata = (u32*)&wrqu->data; val32 = *pdata; arg = (u16)(val32&0x0000ffff); major_cmd = (u8)(val32>>24); minor_cmd = (u8)((val32>>16)&0x00ff); extra_arg = *(pdata+1); switch(major_cmd) { case 0x70://read_reg switch(minor_cmd) { case 1: DBG_8192C("rtw_read8(0x%x)=0x%02x\n", arg, rtw_read8(padapter, arg)); break; case 2: DBG_8192C("rtw_read16(0x%x)=0x%04x\n", arg, rtw_read16(padapter, arg)); break; case 4: DBG_8192C("rtw_read32(0x%x)=0x%08x\n", arg, rtw_read32(padapter, arg)); break; } break; case 0x71://write_reg switch(minor_cmd) { case 1: rtw_write8(padapter, arg, extra_arg); DBG_8192C("rtw_write8(0x%x)=0x%02x\n", arg, rtw_read8(padapter, arg)); break; case 2: rtw_write16(padapter, arg, extra_arg); DBG_8192C("rtw_write16(0x%x)=0x%04x\n", arg, rtw_read16(padapter, arg)); break; case 4: rtw_write32(padapter, arg, extra_arg); DBG_8192C("rtw_write32(0x%x)=0x%08x\n", arg, rtw_read32(padapter, arg)); break; } break; case 0x72://read_bb DBG_8192C("read_bbreg(0x%x)=0x%x\n", arg, padapter->HalFunc.read_bbreg(padapter, arg, 0xffffffff)); break; case 0x73://write_bb padapter->HalFunc.write_bbreg(padapter, arg, 0xffffffff, extra_arg); DBG_8192C("write_bbreg(0x%x)=0x%x\n", arg, padapter->HalFunc.read_bbreg(padapter, arg, 0xffffffff)); break; case 0x74://read_rf DBG_8192C("read RF_reg path(0x%02x),offset(0x%x),value(0x%08x)\n",minor_cmd,arg,padapter->HalFunc.read_rfreg(padapter, minor_cmd, arg, 0xffffffff)); break; case 0x75://write_rf padapter->HalFunc.write_rfreg(padapter, minor_cmd, arg, 0xffffffff, extra_arg); DBG_8192C("write RF_reg path(0x%02x),offset(0x%x),value(0x%08x)\n",minor_cmd,arg, padapter->HalFunc.read_rfreg(padapter, minor_cmd, arg, 0xffffffff)); break; case 0x76: switch(minor_cmd) { case 0x00: //normal mode, padapter->recvpriv.is_signal_dbg = 0; break; case 0x01: //dbg mode padapter->recvpriv.is_signal_dbg = 1; extra_arg = extra_arg>100?100:extra_arg; extra_arg = extra_arg<0?0:extra_arg; padapter->recvpriv.signal_strength_dbg=extra_arg; break; } break; case 0x78: //IOL test switch(minor_cmd) { #ifdef CONFIG_IOL case 0x04: //LLT table initialization test { u8 page_boundary = 0xf9; { struct xmit_frame *xmit_frame; if((xmit_frame=rtw_IOL_accquire_xmit_frame(padapter)) == NULL) { ret = -ENOMEM; break; } rtw_IOL_append_LLT_cmd(xmit_frame, page_boundary); if(_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 500) ) ret = -EPERM; } } break; case 0x05: //blink LED test { u16 reg = 0x4c; u32 blink_num = 50; u32 blink_delay_ms = 200; int i; { struct xmit_frame *xmit_frame; if((xmit_frame=rtw_IOL_accquire_xmit_frame(padapter)) == NULL) { ret = -ENOMEM; break; } for(i=0;i<blink_num;i++){ rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x00); rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms); rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x08); rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms); } if(_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, (blink_delay_ms*blink_num*2)+200) ) ret = -EPERM; } } break; case 0x06: //continuous wirte byte test { u16 reg = arg; u16 start_value = 0; u32 write_num = extra_arg; int i; u8 final; { struct xmit_frame *xmit_frame; if((xmit_frame=rtw_IOL_accquire_xmit_frame(padapter)) == NULL) { ret = -ENOMEM; break; } for(i=0;i<write_num;i++){ rtw_IOL_append_WB_cmd(xmit_frame, reg, i+start_value); } if(_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000)) ret = -EPERM; } if(start_value+write_num-1 == (final=rtw_read8(padapter, reg)) ) { DBG_871X("continuous IOL_CMD_WB_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final); } else { DBG_871X("continuous IOL_CMD_WB_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final); } } break; case 0x07: //continuous wirte word test { u16 reg = arg; u16 start_value = 200; u32 write_num = extra_arg; int i; u16 final; { struct xmit_frame *xmit_frame; if((xmit_frame=rtw_IOL_accquire_xmit_frame(padapter)) == NULL) { ret = -ENOMEM; break; } for(i=0;i<write_num;i++){ rtw_IOL_append_WW_cmd(xmit_frame, reg, i+start_value); } if(_SUCCESS !=rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000)) ret = -EPERM; } if(start_value+write_num-1 == (final=rtw_read16(padapter, reg)) ) { DBG_871X("continuous IOL_CMD_WW_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final); } else { DBG_871X("continuous IOL_CMD_WW_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final); } } break; case 0x08: //continuous wirte dword test { u16 reg = arg; u32 start_value = 0x110000c7; u32 write_num = extra_arg; int i; u32 final; { struct xmit_frame *xmit_frame; if((xmit_frame=rtw_IOL_accquire_xmit_frame(padapter)) == NULL) { ret = -ENOMEM; break; } for(i=0;i<write_num;i++){ rtw_IOL_append_WD_cmd(xmit_frame, reg, i+start_value); } if(_SUCCESS !=rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000)) ret = -EPERM; } if(start_value+write_num-1 == (final=rtw_read32(padapter, reg)) ) { DBG_871X("continuous IOL_CMD_WD_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final); } else { DBG_871X("continuous IOL_CMD_WD_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final); } } break; #endif //CONFIG_IOL } break; case 0x7F: switch(minor_cmd) { case 0x0: DBG_8192C("fwstate=0x%x\n", get_fwstate(pmlmepriv)); break; case 0x01: DBG_8192C("auth_alg=0x%x, enc_alg=0x%x, auth_type=0x%x, enc_type=0x%x\n", psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm, psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus); break; case 0x02: DBG_8192C("pmlmeinfo->state=0x%x\n", pmlmeinfo->state); break; case 0x03: DBG_8192C("qos_option=%d\n", pmlmepriv->qospriv.qos_option); DBG_8192C("ht_option=%d\n", pmlmepriv->htpriv.ht_option); break; case 0x04: DBG_8192C("cur_ch=%d\n", pmlmeext->cur_channel); DBG_8192C("cur_bw=%d\n", pmlmeext->cur_bwmode); DBG_8192C("cur_ch_off=%d\n", pmlmeext->cur_ch_offset); break; case 0x05: psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress); if(psta) { int i; struct recv_reorder_ctrl *preorder_ctrl; DBG_8192C("sta's macaddr:" MAC_FMT "\n", MAC_ARG(psta->hwaddr)); DBG_8192C("rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self); DBG_8192C("qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate); DBG_8192C("state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid); DBG_8192C("bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi); DBG_8192C("ampdu_enable = %d\n", psta->htpriv.ampdu_enable); DBG_8192C("agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap); for(i=0;i<16;i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; if(preorder_ctrl->enable) { DBG_8192C("tid=%d, indicate_seq=%d\n", i, preorder_ctrl->indicate_seq); } } } else { DBG_8192C("can't get sta's macaddr, cur_network's macaddr:" MAC_FMT "\n", MAC_ARG(cur_network->network.MacAddress)); } break; case 0x06: { u8 DMFlag; padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_DM_FLAG, (u8 *)(&DMFlag)); DBG_8192C("(B)DMFlag=0x%x, arg=0x%x\n", DMFlag, arg); DMFlag = (u8)(0x0f&arg); DBG_8192C("(A)DMFlag=0x%x\n", DMFlag); padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_DM_FLAG, (u8 *)(&DMFlag)); } break; case 0x07: DBG_8192C("bSurpriseRemoved=%d, bDriverStopped=%d\n", padapter->bSurpriseRemoved, padapter->bDriverStopped); break; case 0x08: { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct recv_priv *precvpriv = &padapter->recvpriv; DBG_8192C("free_xmitbuf_cnt=%d, free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt); #ifdef CONFIG_USB_HCI DBG_8192C("rx_urb_pending_cn=%d\n", precvpriv->rx_pending_cnt); #endif } break; case 0x09: { int i, j; _list *plist, *phead; struct recv_reorder_ctrl *preorder_ctrl; #ifdef CONFIG_AP_MODE DBG_8192C("sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap); #endif _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL); for(i=0; i< NUM_STA; i++) { phead = &(pstapriv->sta_hash[i]); plist = get_next(phead); while ((rtw_end_of_queue_search(phead, plist)) == _FALSE) { psta = LIST_CONTAINOR(plist, struct sta_info, hash_list); plist = get_next(plist); if(extra_arg == psta->aid) { DBG_8192C("sta's macaddr:" MAC_FMT "\n", MAC_ARG(psta->hwaddr)); DBG_8192C("rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self); DBG_8192C("qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate); DBG_8192C("state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid); DBG_8192C("bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi); DBG_8192C("ampdu_enable = %d\n", psta->htpriv.ampdu_enable); DBG_8192C("agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap); DBG_8192C("capability=0x%x\n", psta->capability); DBG_8192C("flags=0x%x\n", psta->flags); DBG_8192C("wpa_psk=0x%x\n", psta->wpa_psk); DBG_8192C("wpa2_group_cipher=0x%x\n", psta->wpa2_group_cipher); DBG_8192C("wpa2_pairwise_cipher=0x%x\n", psta->wpa2_pairwise_cipher); DBG_8192C("qos_info=0x%x\n", psta->qos_info); DBG_8192C("dot118021XPrivacy=0x%x\n", psta->dot118021XPrivacy); for(j=0;j<16;j++) { preorder_ctrl = &psta->recvreorder_ctrl[j]; if(preorder_ctrl->enable) { DBG_8192C("tid=%d, indicate_seq=%d\n", j, preorder_ctrl->indicate_seq); } } } } } _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL); } break; case 0x0c://dump rx packet { DBG_8192C("dump rx packet (%d)\n",extra_arg); //pHalData->bDumpRxPkt =extra_arg; padapter->HalFunc.SetHalDefVarHandler(padapter, HAL_DEF_DBG_DUMP_RXPKT, &(extra_arg)); } break; #if 0 case 0x0d://dump cam { //u8 entry = (u8) extra_arg; u8 entry=0; //dump cam for(entry=0;entry<32;entry++) read_cam(padapter,entry); } break; #endif #ifdef DBG_CONFIG_ERROR_DETECT case 0x0f: { if(extra_arg == 0){ DBG_8192C("###### silent reset test.......#####\n"); if(padapter->HalFunc.silentreset) padapter->HalFunc.silentreset(padapter); } } break; case 0x12: { struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; DBG_8192C("==>silent resete cnts:%d\n",pwrpriv->ips_enter_cnts); } break; #endif case 0x10:// driver version display DBG_8192C("rtw driver version=%s\n", DRIVERVERSION); break; case 0x11: { DBG_8192C("turn %s Rx RSSI display function\n",(extra_arg==1)?"on":"off"); padapter->bRxRSSIDisplay = extra_arg ; } break; #if 1 case 0xdd://registers dump , 0 for mac reg,1 for bb reg, 2 for rf reg { if(extra_arg==0){ mac_reg_dump(padapter); } else if(extra_arg==1){ bb_reg_dump(padapter); } else if(extra_arg==2){ rf_reg_dump(padapter); } } break; #endif case 0xee://turn on/off dynamic funcs { u8 dm_flag; if(0xf==extra_arg){ padapter->HalFunc.GetHalDefVarHandler(padapter, HAL_DEF_DBG_DM_FUNC,&dm_flag); DBG_8192C(" === DMFlag(0x%02x) === \n",dm_flag); DBG_8192C("extra_arg = 0 - disable all dynamic func \n"); DBG_8192C("extra_arg = 1 - disable DIG- BIT(0)\n"); DBG_8192C("extra_arg = 2 - disable High power - BIT(1)\n"); DBG_8192C("extra_arg = 3 - disable tx power tracking - BIT(2)\n"); DBG_8192C("extra_arg = 4 - disable BT coexistence - BIT(3)\n"); DBG_8192C("extra_arg = 5 - disable antenna diversity - BIT(4)\n"); DBG_8192C("extra_arg = 6 - enable all dynamic func \n"); } else{ /* extra_arg = 0 - disable all dynamic func extra_arg = 1 - disable DIG extra_arg = 2 - disable tx power tracking extra_arg = 3 - turn on all dynamic func */ padapter->HalFunc.SetHalDefVarHandler(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg)); padapter->HalFunc.GetHalDefVarHandler(padapter, HAL_DEF_DBG_DM_FUNC,&dm_flag); DBG_8192C(" === DMFlag(0x%02x) === \n",dm_flag); } } break; case 0xfd: rtw_write8(padapter, 0xc50, arg); DBG_8192C("wr(0xc50)=0x%x\n", rtw_read8(padapter, 0xc50)); rtw_write8(padapter, 0xc58, arg); DBG_8192C("wr(0xc58)=0x%x\n", rtw_read8(padapter, 0xc58)); break; case 0xfe: DBG_8192C("rd(0xc50)=0x%x\n", rtw_read8(padapter, 0xc50)); DBG_8192C("rd(0xc58)=0x%x\n", rtw_read8(padapter, 0xc58)); break; case 0xff: { DBG_8192C("dbg(0x210)=0x%x\n", rtw_read32(padapter, 0x210)); DBG_8192C("dbg(0x608)=0x%x\n", rtw_read32(padapter, 0x608)); DBG_8192C("dbg(0x280)=0x%x\n", rtw_read32(padapter, 0x280)); DBG_8192C("dbg(0x284)=0x%x\n", rtw_read32(padapter, 0x284)); DBG_8192C("dbg(0x288)=0x%x\n", rtw_read32(padapter, 0x288)); DBG_8192C("dbg(0x664)=0x%x\n", rtw_read32(padapter, 0x664)); DBG_8192C("\n"); DBG_8192C("dbg(0x430)=0x%x\n", rtw_read32(padapter, 0x430)); DBG_8192C("dbg(0x438)=0x%x\n", rtw_read32(padapter, 0x438)); DBG_8192C("dbg(0x440)=0x%x\n", rtw_read32(padapter, 0x440)); DBG_8192C("dbg(0x458)=0x%x\n", rtw_read32(padapter, 0x458)); DBG_8192C("dbg(0x484)=0x%x\n", rtw_read32(padapter, 0x484)); DBG_8192C("dbg(0x488)=0x%x\n", rtw_read32(padapter, 0x488)); DBG_8192C("dbg(0x444)=0x%x\n", rtw_read32(padapter, 0x444)); DBG_8192C("dbg(0x448)=0x%x\n", rtw_read32(padapter, 0x448)); DBG_8192C("dbg(0x44c)=0x%x\n", rtw_read32(padapter, 0x44c)); DBG_8192C("dbg(0x450)=0x%x\n", rtw_read32(padapter, 0x450)); } break; } break; default: DBG_8192C("error dbg cmd!\n"); break; } return ret; } static int wpa_set_param(struct net_device *dev, u8 name, u32 value) { uint ret=0; u32 flags; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); switch (name){ case IEEE_PARAM_WPA_ENABLED: padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_8021X; //802.1x //ret = ieee80211_wpa_enable(ieee, value); switch((value)&0xff) { case 1 : //WPA padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; //WPA_PSK padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled; break; case 2: //WPA2 padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; //WPA2_PSK padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled; break; } RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_info_,("wpa_set_param:padapter->securitypriv.ndisauthtype=%d\n", padapter->securitypriv.ndisauthtype)); break; case IEEE_PARAM_TKIP_COUNTERMEASURES: //ieee->tkip_countermeasures=value; break; case IEEE_PARAM_DROP_UNENCRYPTED: { /* HACK: * * wpa_supplicant calls set_wpa_enabled when the driver * is loaded and unloaded, regardless of if WPA is being * used. No other calls are made which can be used to * determine if encryption will be used or not prior to * association being expected. If encryption is not being * used, drop_unencrypted is set to false, else true -- we * can use this to determine if the CAP_PRIVACY_ON bit should * be set. */ #if 0 struct ieee80211_security sec = { .flags = SEC_ENABLED, .enabled = value, }; ieee->drop_unencrypted = value; /* We only change SEC_LEVEL for open mode. Others * are set by ipw_wpa_set_encryption. */ if (!value) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_0; } else { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } if (ieee->set_security) ieee->set_security(ieee->dev, &sec); #endif break; } case IEEE_PARAM_PRIVACY_INVOKED: //ieee->privacy_invoked=value; break; case IEEE_PARAM_AUTH_ALGS: ret = wpa_set_auth_algs(dev, value); break; case IEEE_PARAM_IEEE_802_1X: //ieee->ieee802_1x=value; break; case IEEE_PARAM_WPAX_SELECT: // added for WPA2 mixed mode //DBG_8192C(KERN_WARNING "------------------------>wpax value = %x\n", value); /* spin_lock_irqsave(&ieee->wpax_suitlist_lock,flags); ieee->wpax_type_set = 1; ieee->wpax_type_notify = value; spin_unlock_irqrestore(&ieee->wpax_suitlist_lock,flags); */ break; default: ret = -EOPNOTSUPP; break; } return ret; } static int wpa_mlme(struct net_device *dev, u32 command, u32 reason) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); switch (command) { case IEEE_MLME_STA_DEAUTH: if(!rtw_set_802_11_disassociate(padapter)) ret = -1; break; case IEEE_MLME_STA_DISASSOC: if(!rtw_set_802_11_disassociate(padapter)) ret = -1; break; default: ret = -EOPNOTSUPP; break; } return ret; } static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p) { struct ieee_param *param; uint ret=0; //down(&ieee->wx_sem); if (p->length < sizeof(struct ieee_param) || !p->pointer){ ret = -EINVAL; goto out; } param = (struct ieee_param *)rtw_malloc(p->length); if (param == NULL) { ret = -ENOMEM; goto out; } if (copy_from_user(param, p->pointer, p->length)) { rtw_mfree((u8*)param, p->length); ret = -EFAULT; goto out; } switch (param->cmd) { case IEEE_CMD_SET_WPA_PARAM: ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value); break; case IEEE_CMD_SET_WPA_IE: //ret = wpa_set_wpa_ie(dev, param, p->length); ret = rtw_set_wpa_ie((_adapter *)rtw_netdev_priv(dev), (char*)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len); break; case IEEE_CMD_SET_ENCRYPTION: ret = wpa_set_encryption(dev, param, p->length); break; case IEEE_CMD_MLME: ret = wpa_mlme(dev, param->u.mlme.command, param->u.mlme.reason_code); break; default: DBG_8192C("Unknown WPA supplicant request: %d\n", param->cmd); ret = -EOPNOTSUPP; break; } if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT; rtw_mfree((u8 *)param, p->length); out: //up(&ieee->wx_sem); return ret; } #ifdef CONFIG_AP_MODE static u8 set_pairwise_key(_adapter *padapter, struct sta_info *psta) { struct cmd_obj* ph2c; struct set_stakey_parm *psetstakey_para; struct cmd_priv *pcmdpriv=&padapter->cmdpriv; u8 res=_SUCCESS; ph2c = (struct cmd_obj*)rtw_zmalloc(sizeof(struct cmd_obj)); if ( ph2c == NULL){ res= _FAIL; goto exit; } psetstakey_para = (struct set_stakey_parm*)rtw_zmalloc(sizeof(struct set_stakey_parm)); if(psetstakey_para==NULL){ rtw_mfree((u8 *) ph2c, sizeof(struct cmd_obj)); res=_FAIL; goto exit; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); psetstakey_para->algorithm = (u8)psta->dot118021XPrivacy; _rtw_memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN); _rtw_memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16); res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } static int set_group_key(_adapter *padapter, u8 *key, u8 alg, int keyid) { u8 keylen; struct cmd_obj* pcmd; struct setkey_parm *psetkeyparm; struct cmd_priv *pcmdpriv=&(padapter->cmdpriv); int res=_SUCCESS; DBG_8192C("%s\n", __FUNCTION__); pcmd = (struct cmd_obj*)rtw_zmalloc(sizeof(struct cmd_obj)); if(pcmd==NULL){ res= _FAIL; goto exit; } psetkeyparm=(struct setkey_parm*)rtw_zmalloc(sizeof(struct setkey_parm)); if(psetkeyparm==NULL){ rtw_mfree((unsigned char *)pcmd, sizeof(struct cmd_obj)); res= _FAIL; goto exit; } _rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm)); psetkeyparm->keyid=(u8)keyid; psetkeyparm->algorithm = alg; psetkeyparm->set_tx = 1; switch(alg) { case _WEP40_: keylen = 5; break; case _WEP104_: keylen = 13; break; case _TKIP_: case _TKIP_WTMIC_: case _AES_: keylen = 16; default: keylen = 16; } _rtw_memcpy(&(psetkeyparm->key[0]), key, keylen); pcmd->cmdcode = _SetKey_CMD_; pcmd->parmbuf = (u8 *)psetkeyparm; pcmd->cmdsz = (sizeof(struct setkey_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; _rtw_init_listhead(&pcmd->list); res = rtw_enqueue_cmd(pcmdpriv, pcmd); exit: return res; } static int set_wep_key(_adapter *padapter, u8 *key, u8 keylen, int keyid) { u8 alg; switch(keylen) { case 5: alg =_WEP40_; break; case 13: alg =_WEP104_; break; default: alg =_NO_PRIVACY_; } return set_group_key(padapter, key, alg, keyid); } static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len) { int ret = 0; u32 wep_key_idx, wep_key_len,wep_total_len; NDIS_802_11_WEP *pwep = NULL; struct sta_info *psta = NULL, *pbcmc_sta = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv* psecuritypriv=&(padapter->securitypriv); struct sta_priv *pstapriv = &padapter->stapriv; DBG_8192C("%s\n", __FUNCTION__); param->u.crypt.err = 0; param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0'; //sizeof(struct ieee_param) = 64 bytes; //if (param_len != (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) { ret = -EINVAL; goto exit; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { if (param->u.crypt.idx >= WEP_KEYS) { ret = -EINVAL; goto exit; } } else { psta = rtw_get_stainfo(pstapriv, param->sta_addr); if(!psta) { //ret = -EINVAL; DBG_8192C("rtw_set_encryption(), sta has already been removed or never been added\n"); goto exit; } } if (strcmp(param->u.crypt.alg, "none") == 0 && (psta==NULL)) { //todo:clear default encryption keys DBG_8192C("clear default encryption keys, keyid=%d\n", param->u.crypt.idx); goto exit; } if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta==NULL)) { DBG_8192C("r871x_set_encryption, crypt.alg = WEP\n"); wep_key_idx = param->u.crypt.idx; wep_key_len = param->u.crypt.key_len; DBG_8192C("r871x_set_encryption, wep_key_idx=%d, len=%d\n", wep_key_idx, wep_key_len); if((wep_key_idx >= WEP_KEYS) || (wep_key_len<=0)) { ret = -EINVAL; goto exit; } if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(NDIS_802_11_WEP, KeyMaterial); pwep =(NDIS_802_11_WEP *)rtw_malloc(wep_total_len); if(pwep == NULL){ DBG_8192C(" r871x_set_encryption: pwep allocate fail !!!\n"); goto exit; } _rtw_memset(pwep, 0, wep_total_len); pwep->KeyLength = wep_key_len; pwep->Length = wep_total_len; } pwep->KeyIndex = wep_key_idx; _rtw_memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength); if(param->u.crypt.set_tx) { DBG_8192C("wep, set_tx=1\n"); psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled; psecuritypriv->dot11PrivacyAlgrthm=_WEP40_; psecuritypriv->dot118021XGrpPrivacy=_WEP40_; if(pwep->KeyLength==13) { psecuritypriv->dot11PrivacyAlgrthm=_WEP104_; psecuritypriv->dot118021XGrpPrivacy=_WEP104_; } psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx; _rtw_memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength); psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength; set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx); } else { DBG_8192C("wep, set_tx=0\n"); //don't update "psecuritypriv->dot11PrivacyAlgrthm" and //"psecuritypriv->dot11PrivacyKeyIndex=keyid", but can rtw_set_key to cam _rtw_memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength); psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength; set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx); } goto exit; } if(!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) // //group key { if(param->u.crypt.set_tx ==1) { if(strcmp(param->u.crypt.alg, "WEP") == 0) { DBG_8192C("%s, set group_key, WEP\n", __FUNCTION__); _rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if(param->u.crypt.key_len==13) { psecuritypriv->dot118021XGrpPrivacy = _WEP104_; } } else if(strcmp(param->u.crypt.alg, "TKIP") == 0) { DBG_8192C("%s, set group_key, TKIP\n", __FUNCTION__); psecuritypriv->dot118021XGrpPrivacy = _TKIP_; _rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); //DEBUG_ERR("set key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len); //set mic key _rtw_memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8); _rtw_memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8); psecuritypriv->busetkipkey = _TRUE; } else if(strcmp(param->u.crypt.alg, "CCMP") == 0) { DBG_8192C("%s, set group_key, CCMP\n", __FUNCTION__); psecuritypriv->dot118021XGrpPrivacy = _AES_; _rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); } else { DBG_8192C("%s, set group_key, none\n", __FUNCTION__); psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_; } psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx; psecuritypriv->binstallGrpkey = _TRUE; psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;//!!! set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); pbcmc_sta=rtw_get_bcmc_stainfo(padapter); if(pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = _FALSE; pbcmc_sta->dot118021XPrivacy= psecuritypriv->dot118021XGrpPrivacy;//rx will use bmc_sta's dot118021XPrivacy } } goto exit; } if(psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) // psk/802_1x { if(check_fwstate(pmlmepriv, WIFI_AP_STATE)) { if(param->u.crypt.set_tx ==1) { _rtw_memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); if(strcmp(param->u.crypt.alg, "WEP") == 0) { DBG_8192C("%s, set pairwise key, WEP\n", __FUNCTION__); psta->dot118021XPrivacy = _WEP40_; if(param->u.crypt.key_len==13) { psta->dot118021XPrivacy = _WEP104_; } } else if(strcmp(param->u.crypt.alg, "TKIP") == 0) { DBG_8192C("%s, set pairwise key, TKIP\n", __FUNCTION__); psta->dot118021XPrivacy = _TKIP_; //DEBUG_ERR("set key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len); //set mic key _rtw_memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8); _rtw_memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8); psecuritypriv->busetkipkey = _TRUE; } else if(strcmp(param->u.crypt.alg, "CCMP") == 0) { DBG_8192C("%s, set pairwise key, CCMP\n", __FUNCTION__); psta->dot118021XPrivacy = _AES_; } else { DBG_8192C("%s, set pairwise key, none\n", __FUNCTION__); psta->dot118021XPrivacy = _NO_PRIVACY_; } set_pairwise_key(padapter, psta); psta->ieee8021x_blocked = _FALSE; } else//group key??? { if(strcmp(param->u.crypt.alg, "WEP") == 0) { _rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); psecuritypriv->dot118021XGrpPrivacy = _WEP40_; if(param->u.crypt.key_len==13) { psecuritypriv->dot118021XGrpPrivacy = _WEP104_; } } else if(strcmp(param->u.crypt.alg, "TKIP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _TKIP_; _rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); //DEBUG_ERR("set key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len); //set mic key _rtw_memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8); _rtw_memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8); psecuritypriv->busetkipkey = _TRUE; } else if(strcmp(param->u.crypt.alg, "CCMP") == 0) { psecuritypriv->dot118021XGrpPrivacy = _AES_; _rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len)); } else { psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_; } psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx; psecuritypriv->binstallGrpkey = _TRUE; psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;//!!! set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx); pbcmc_sta=rtw_get_bcmc_stainfo(padapter); if(pbcmc_sta) { pbcmc_sta->ieee8021x_blocked = _FALSE; pbcmc_sta->dot118021XPrivacy= psecuritypriv->dot118021XGrpPrivacy;//rx will use bmc_sta's dot118021XPrivacy } } } } exit: if(pwep) { rtw_mfree((u8 *)pwep, wep_total_len); } return ret; } static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len) { int ret=0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct sta_priv *pstapriv = &padapter->stapriv; unsigned char *pbuf = param->u.bcn_ie.buf; DBG_8192C("%s, len=%d\n", __FUNCTION__, len); if(check_fwstate(pmlmepriv, WIFI_AP_STATE) != _TRUE) return -EINVAL; _rtw_memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2); #ifdef SUPPORT_64_STA pstapriv->max_num_sta = NUM_STA; #else //SUPPORT_64_STA if((pstapriv->max_num_sta>NUM_STA) || (pstapriv->max_num_sta<=0)) pstapriv->max_num_sta = NUM_STA; #endif//SUPPORT_64_STA if(rtw_check_beacon_data(padapter, pbuf, (len-12-2)) == _SUCCESS)// 12 = param header, 2:no packed ret = 0; else ret = -EINVAL; return ret; } static int rtw_hostapd_sta_flush(struct net_device *dev) { //_irqL irqL; //_list *phead, *plist; int ret=0; //struct sta_info *psta = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); //struct sta_priv *pstapriv = &padapter->stapriv; DBG_8192C("%s\n", __FUNCTION__); flush_all_cam_entry(padapter); //clear CAM #if 0 phead = &pstapriv->asoc_list; plist = get_next(phead); //free sta asoc_queue while ((rtw_end_of_queue_search(phead, plist)) == _FALSE) { psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list); plist = get_next(plist); rtw_list_delete(&psta->asoc_list); //tear down Rx AMPDU send_delba(padapter, 0, psta->hwaddr);// recipient //tear down TX AMPDU send_delba(padapter, 1, psta->hwaddr);// // originator psta->htpriv.agg_enable_bitmap = 0x0;//reset psta->htpriv.candidate_tid_bitmap = 0x0;//reset issue_deauth(padapter, psta->hwaddr, WLAN_REASON_DEAUTH_LEAVING); _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); rtw_free_stainfo(padapter, psta); _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); } #endif ret = rtw_sta_flush(padapter); return ret; } static int rtw_add_sta(struct net_device *dev, struct ieee_param *param) { _irqL irqL; int ret=0; struct sta_info *psta = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct sta_priv *pstapriv = &padapter->stapriv; DBG_8192C("rtw_add_sta(aid=%d)=" MAC_FMT "\n", param->u.add_sta.aid, MAC_ARG(param->sta_addr)); if(check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != _TRUE) { return -EINVAL; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { return -EINVAL; } /* psta = rtw_get_stainfo(pstapriv, param->sta_addr); if(psta) { DBG_8192C("rtw_add_sta(), free has been added psta=%p\n", psta); _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); rtw_free_stainfo(padapter, psta); _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); psta = NULL; } */ //psta = rtw_alloc_stainfo(pstapriv, param->sta_addr); psta = rtw_get_stainfo(pstapriv, param->sta_addr); if(psta) { int flags = param->u.add_sta.flags; //DBG_8192C("rtw_add_sta(), init sta's variables, psta=%p\n", psta); psta->aid = param->u.add_sta.aid;//aid=1~2007 _rtw_memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16); //check wmm cap. if(WLAN_STA_WME&flags) psta->qos_option = 1; else psta->qos_option = 0; if(pmlmepriv->qospriv.qos_option == 0) psta->qos_option = 0; #ifdef CONFIG_80211N_HT //chec 802.11n ht cap. if(WLAN_STA_HT&flags) { psta->htpriv.ht_option = _TRUE; psta->qos_option = 1; _rtw_memcpy((void*)&psta->htpriv.ht_cap, (void*)&param->u.add_sta.ht_cap, sizeof(struct rtw_ieee80211_ht_cap)); } else { psta->htpriv.ht_option = _FALSE; } if(pmlmepriv->htpriv.ht_option == _FALSE) psta->htpriv.ht_option = _FALSE; #endif update_sta_info_apmode(padapter, psta); } else { ret = -ENOMEM; } return ret; } static int rtw_del_sta(struct net_device *dev, struct ieee_param *param) { _irqL irqL; int ret=0; struct sta_info *psta = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct sta_priv *pstapriv = &padapter->stapriv; DBG_8192C("rtw_del_sta=" MAC_FMT "\n", MAC_ARG(param->sta_addr)); if(check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != _TRUE) { return -EINVAL; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { return -EINVAL; } psta = rtw_get_stainfo(pstapriv, param->sta_addr); if(psta) { //DBG_8192C("free psta=%p, aid=%d\n", psta, psta->aid); #if 0 //tear down Rx AMPDU send_delba(padapter, 0, psta->hwaddr);// recipient //tear down TX AMPDU send_delba(padapter, 1, psta->hwaddr);// // originator psta->htpriv.agg_enable_bitmap = 0x0;//reset psta->htpriv.candidate_tid_bitmap = 0x0;//reset issue_deauth(padapter, psta->hwaddr, WLAN_REASON_DEAUTH_LEAVING); _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); rtw_free_stainfo(padapter, psta); _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); pstapriv->sta_dz_bitmap &=~BIT(psta->aid); pstapriv->tim_bitmap &=~BIT(psta->aid); #endif _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL); if(rtw_is_list_empty(&psta->asoc_list)==_FALSE) { rtw_list_delete(&psta->asoc_list); ap_free_sta(padapter, psta); } _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL); psta = NULL; } else { DBG_8192C("rtw_del_sta(), sta has already been removed or never been added\n"); //ret = -1; } return ret; } static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param) { int ret=0; struct sta_info *psta = NULL; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct sta_priv *pstapriv = &padapter->stapriv; DBG_8192C("rtw_get_sta_wpaie, sta_addr: " MAC_FMT "\n", MAC_ARG(param->sta_addr)); if(check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != _TRUE) { return -EINVAL; } if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { return -EINVAL; } psta = rtw_get_stainfo(pstapriv, param->sta_addr); if(psta) { if((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_GENERIC)) { int wpa_ie_len; int copy_len; wpa_ie_len = psta->wpa_ie[1]; copy_len = ((wpa_ie_len+2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)):(wpa_ie_len+2); param->u.wpa_ie.len = copy_len; _rtw_memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len); } else { //ret = -1; DBG_8192C("sta's wpa_ie is NONE\n"); } } else { ret = -1; } return ret; } static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len) { int ret=0; unsigned char wps_oui[4]={0x0,0x50,0xf2,0x04}; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); int ie_len; DBG_8192C("%s, len=%d\n", __FUNCTION__, len); if(check_fwstate(pmlmepriv, WIFI_AP_STATE) != _TRUE) return -EINVAL; ie_len = len-12-2;// 12 = param header, 2:no packed if(pmlmepriv->wps_beacon_ie) { rtw_mfree(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len); pmlmepriv->wps_beacon_ie = NULL; } if(ie_len>0) { pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len); pmlmepriv->wps_beacon_ie_len = ie_len; if ( pmlmepriv->wps_beacon_ie == NULL) { DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__); return -EINVAL; } _rtw_memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len); update_beacon(padapter, _VENDOR_SPECIFIC_IE_, wps_oui, _TRUE); pmlmeext->bstart_bss = _TRUE; } return ret; } static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len) { int ret=0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); int ie_len; DBG_8192C("%s, len=%d\n", __FUNCTION__, len); if(check_fwstate(pmlmepriv, WIFI_AP_STATE) != _TRUE) return -EINVAL; ie_len = len-12-2;// 12 = param header, 2:no packed if(pmlmepriv->wps_probe_resp_ie) { rtw_mfree(pmlmepriv->wps_probe_resp_ie, pmlmepriv->wps_probe_resp_ie_len); pmlmepriv->wps_probe_resp_ie = NULL; } if(ie_len>0) { pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len); pmlmepriv->wps_probe_resp_ie_len = ie_len; if ( pmlmepriv->wps_probe_resp_ie == NULL) { DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__); return -EINVAL; } _rtw_memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len); } return ret; } static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len) { int ret=0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); int ie_len; DBG_8192C("%s, len=%d\n", __FUNCTION__, len); if(check_fwstate(pmlmepriv, WIFI_AP_STATE) != _TRUE) return -EINVAL; ie_len = len-12-2;// 12 = param header, 2:no packed if(pmlmepriv->wps_assoc_resp_ie) { rtw_mfree(pmlmepriv->wps_assoc_resp_ie, pmlmepriv->wps_assoc_resp_ie_len); pmlmepriv->wps_assoc_resp_ie = NULL; } if(ie_len>0) { pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len); pmlmepriv->wps_assoc_resp_ie_len = ie_len; if ( pmlmepriv->wps_assoc_resp_ie == NULL) { DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__); return -EINVAL; } _rtw_memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len); } return ret; } static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p) { struct ieee_param *param; int ret=0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); //DBG_8192C("%s\n", __FUNCTION__); /* * this function is expect to call in master mode, which allows no power saving * so, we just check hw_init_completed instead of call rfpwrstate_check() */ if (padapter->hw_init_completed==_FALSE){ ret = -EPERM; goto out; } //if (p->length < sizeof(struct ieee_param) || !p->pointer){ if(!p->pointer){ ret = -EINVAL; goto out; } param = (struct ieee_param *)rtw_malloc(p->length); if (param == NULL) { ret = -ENOMEM; goto out; } if (copy_from_user(param, p->pointer, p->length)) { rtw_mfree((u8*)param, p->length); ret = -EFAULT; goto out; } //DBG_8192C("%s, cmd=%d\n", __FUNCTION__, param->cmd); switch (param->cmd) { case RTL871X_HOSTAPD_FLUSH: ret = rtw_hostapd_sta_flush(dev); break; case RTL871X_HOSTAPD_ADD_STA: ret = rtw_add_sta(dev, param); break; case RTL871X_HOSTAPD_REMOVE_STA: ret = rtw_del_sta(dev, param); break; case RTL871X_HOSTAPD_SET_BEACON: ret = rtw_set_beacon(dev, param, p->length); break; case RTL871X_SET_ENCRYPTION: ret = rtw_set_encryption(dev, param, p->length); break; case RTL871X_HOSTAPD_GET_WPAIE_STA: ret = rtw_get_sta_wpaie(dev, param); break; case RTL871X_HOSTAPD_SET_WPS_BEACON: ret = rtw_set_wps_beacon(dev, param, p->length); break; case RTL871X_HOSTAPD_SET_WPS_PROBE_RESP: ret = rtw_set_wps_probe_resp(dev, param, p->length); break; case RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP: ret = rtw_set_wps_assoc_resp(dev, param, p->length); break; default: DBG_8192C("Unknown hostapd request: %d\n", param->cmd); ret = -EOPNOTSUPP; break; } if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT; rtw_mfree((u8 *)param, p->length); out: return ret; } #endif #include <rtw_android.h> static int rtw_wx_set_priv(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { #ifdef CONFIG_DEBUG_RTW_WX_SET_PRIV char *ext_dbg; #endif int ret = 0; int len = 0; char *ext; int i; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *dwrq = (struct iw_point*)awrq; //RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_notice_, ("+rtw_wx_set_priv\n")); len = dwrq->length; if (!(ext = rtw_vmalloc(len))) return -ENOMEM; if (copy_from_user(ext, dwrq->pointer, len)) { rtw_vmfree(ext, len); return -EFAULT; } //RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_notice_, // ("rtw_wx_set_priv: %s req=%s\n", // dev->name, ext)); #ifdef CONFIG_DEBUG_RTW_WX_SET_PRIV if (!(ext_dbg = rtw_vmalloc(len))) { rtw_vmfree(ext, len); return -ENOMEM; } _rtw_memcpy(ext_dbg, ext, len); #endif //added for wps2.0 @20110524 if(dwrq->flags == 0x8766 && len > 8) { u32 cp_sz; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); u8 *probereq_wpsie = ext; int probereq_wpsie_len = len; u8 wps_oui[4]={0x0,0x50,0xf2,0x04}; if((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) && (_rtw_memcmp(&probereq_wpsie[2], wps_oui, 4) ==_TRUE)) { cp_sz = probereq_wpsie_len>MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN:probereq_wpsie_len; _rtw_memcpy(pmlmepriv->probereq_wpsie, probereq_wpsie, cp_sz); pmlmepriv->probereq_wpsie_len = cp_sz; } goto FREE_EXT; } if( len >= WEXT_CSCAN_HEADER_SIZE && _rtw_memcmp(ext, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE) == _TRUE ){ ret = rtw_wx_set_scan(dev, info, awrq, ext); goto FREE_EXT; } #ifdef CONFIG_ANDROID //DBG_871X("rtw_wx_set_priv: %s req=%s\n", dev->name, ext); i = rtw_android_cmdstr_to_num(ext); switch(i) { case ANDROID_WIFI_CMD_START : indicate_wx_custom_event(padapter, "START"); break; case ANDROID_WIFI_CMD_STOP : indicate_wx_custom_event(padapter, "STOP"); break; case ANDROID_WIFI_CMD_RSSI : { struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct wlan_network *pcur_network = &pmlmepriv->cur_network; if(check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) { sprintf(ext, "%s rssi %d", pcur_network->network.Ssid.Ssid, padapter->recvpriv.rssi); } else { sprintf(ext, "OK"); } } break; case ANDROID_WIFI_CMD_LINKSPEED : { union iwreq_data wrqd; int ret_inner; int mbps; if( 0!=(ret_inner=rtw_wx_get_rate(dev, info, &wrqd, extra)) ){ mbps=0; } else { mbps=wrqd.bitrate.value / 1000000; } sprintf(ext, "LINKSPEED %d", mbps); } break; case ANDROID_WIFI_CMD_MACADDR : sprintf(ext, "MACADDR = " MAC_FMT, MAC_ARG(dev->dev_addr)); break; case ANDROID_WIFI_CMD_SCAN_ACTIVE : { struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); pmlmepriv->scan_mode=SCAN_ACTIVE; sprintf(ext, "OK"); } break; case ANDROID_WIFI_CMD_SCAN_PASSIVE : { struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); pmlmepriv->scan_mode=SCAN_PASSIVE; sprintf(ext, "OK"); } break; case ANDROID_WIFI_CMD_COUNTRY : { char country_code[10]; int channel_plan = RT_CHANNEL_DOMAIN_FCC; union iwreq_data wrqd; int ret_inner; sscanf(ext,"%*s %s",country_code); if(0 == strcmp(country_code, "US")) channel_plan = RT_CHANNEL_DOMAIN_FCC; else if(0 == strcmp(country_code, "EU")) channel_plan = RT_CHANNEL_DOMAIN_ETSI; else if(0 == strcmp(country_code, "JP")) channel_plan = RT_CHANNEL_DOMAIN_MKK; else if(0 == strcmp(country_code, "CN")) channel_plan = RT_CHANNEL_DOMAIN_CHINA; else DBG_871X("%s unknown country_code:%s, set to RT_CHANNEL_DOMAIN_FCC\n", __FUNCTION__, country_code); _rtw_memcpy(&wrqd, &channel_plan, sizeof(int)); if( 0!=(ret_inner=rtw_wx_set_channel_plan(dev, info, &wrqd, extra)) ){ DBG_871X("%s rtw_wx_set_channel_plan error\n", __FUNCTION__); } sprintf(ext, "OK"); } break; default : #ifdef CONFIG_DEBUG_RTW_WX_SET_PRIV DBG_871X("%s: %s unknowned req=%s\n", __FUNCTION__, dev->name, ext_dbg); #endif sprintf(ext, "OK"); } if (copy_to_user(dwrq->pointer, ext, min(dwrq->length, (u16)(strlen(ext)+1)) ) ) ret = -EFAULT; #ifdef CONFIG_DEBUG_RTW_WX_SET_PRIV DBG_871X("%s: %s req=%s rep=%s dwrq->length=%d, strlen(ext)+1=%d\n", __FUNCTION__, dev->name, ext_dbg ,ext, dwrq->length, (u16)(strlen(ext)+1)); #endif #endif //end of CONFIG_ANDROID FREE_EXT: rtw_vmfree(ext, len); #ifdef CONFIG_DEBUG_RTW_WX_SET_PRIV rtw_vmfree(ext_dbg, len); #endif //DBG_871X("rtw_wx_set_priv: (SIOCSIWPRIV) %s ret=%d\n", // dev->name, ret); return ret; } static int rtw_mp_efuse_get(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wdata, char *extra) { struct iw_point *wrqu = (struct iw_point *)wdata; PADAPTER padapter = rtw_netdev_priv(dev); struct mp_priv *pmp_priv; int i,j =0; u8 data[EFUSE_MAP_SIZE]; u8 rawdata[EFUSE_MAX_SIZE]; u16 mapLen=0; char *pch, *ptmp, *token, *tmp[3]={0x00,0x00,0x00}; u16 addr = 0, cnts = 0, max_available_size = 0,raw_cursize = 0 ,raw_maxsize = 0; _rtw_memset(data, '\0', sizeof(data)); _rtw_memset(rawdata, '\0', sizeof(rawdata)); if (copy_from_user(extra, wrqu->pointer, wrqu->length)) return -EFAULT; pch = extra; DBG_8192C("%s: in=%s\n", __func__, extra); i=0; //mac 16 "00e04c871200" rmap,00,2 while ( (token = strsep (&pch,",") )!=NULL ) { if(i>2) break; tmp[i] = token; i++; } if ( strcmp(tmp[0],"realmap") == 0 ) { DBG_8192C("strcmp OK = %s \n" ,tmp[0]); mapLen = EFUSE_MAP_SIZE; if (rtw_efuse_map_read(padapter, 0, mapLen, data) == _SUCCESS){ DBG_8192C("\t rtw_efuse_map_read \n"); }else { DBG_8192C("\t rtw_efuse_map_read : Fail \n"); return -EFAULT; } _rtw_memset(extra, '\0', sizeof(extra)); DBG_8192C("\tOFFSET\tVALUE(hex)\n"); sprintf(extra, "%s \n", extra); for ( i = 0; i < EFUSE_MAP_SIZE; i += 16 ) { DBG_8192C("\t0x%02x\t", i); sprintf(extra, "%s \t0x%02x\t", extra,i); for (j = 0; j < 8; j++) { DBG_8192C("%02X ", data[i+j]); sprintf(extra, "%s %02X", extra, data[i+j]); } DBG_8192C("\t"); sprintf(extra,"%s\t",extra); for (; j < 16; j++){ DBG_8192C("%02X ", data[i+j]); sprintf(extra, "%s %02X", extra, data[i+j]); } DBG_8192C("\n"); sprintf(extra,"%s\n",extra); } DBG_8192C("\n"); wrqu->length = strlen(extra); return 0; } else if ( strcmp(tmp[0],"rmap") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; // rmap addr cnts addr = simple_strtoul(tmp[1], &ptmp, 16); DBG_8192C("addr = %x \n" ,addr); cnts=simple_strtoul(tmp[2], &ptmp,10); if(cnts==0) return -EINVAL; DBG_8192C("cnts = %d \n" ,cnts); //_rtw_memset(extra, '\0', wrqu->data.length); EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + cnts) > max_available_size) { DBG_8192C("(addr + cnts parameter error \n"); return -EFAULT; } if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) { DBG_8192C("rtw_efuse_access error \n"); } else{ DBG_8192C("rtw_efuse_access ok \n"); } _rtw_memset(extra, '\0', sizeof(extra)); for ( i = 0; i < cnts; i ++) { DBG_8192C("0x%02x", data[i]); sprintf(extra, "%s 0x%02X", extra, data[i]); DBG_8192C(" "); sprintf(extra,"%s ",extra); } wrqu->length = strlen(extra)+1; DBG_8192C("extra = %s ", extra); return 0; } else if ( strcmp(tmp[0],"realraw") == 0 ) { addr=0; mapLen = EFUSE_MAX_SIZE; if (rtw_efuse_access(padapter, _FALSE, addr, mapLen, rawdata) == _FAIL) { DBG_8192C("\t rtw_efuse_map_read : Fail \n"); return -EFAULT; } else { DBG_8192C("\t rtw_efuse_access raw ok \n"); } _rtw_memset(extra, '\0', sizeof(extra)); for ( i=0; i<mapLen; i++ ) { DBG_8192C(" %02x", rawdata[i]); sprintf(extra, "%s %02x", extra, rawdata[i] ); if ((i & 0xF) == 0xF){ DBG_8192C("\n\t"); sprintf(extra, "%s\n\t", extra); } else if ((i & 0x7) == 0x7){ DBG_8192C("\t"); sprintf(extra, "%s\t", extra); } } wrqu->length = strlen(extra); return 0; } else if ( strcmp(tmp[0],"mac") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; #ifdef CONFIG_RTL8192C addr = 0x16; cnts = 6; #endif #ifdef CONFIG_RTL8192D addr = 0x19; cnts = 6; #endif EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + mapLen) > max_available_size) { DBG_8192C("(addr + cnts parameter error \n"); return -EFAULT; } if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) { DBG_8192C("rtw_efuse_access error \n"); } else{ DBG_8192C("rtw_efuse_access ok \n"); } _rtw_memset(extra, '\0', sizeof(extra)); for ( i = 0; i < cnts; i ++) { DBG_8192C("0x%02x", data[i]); sprintf(extra, "%s 0x%02X", extra, data[i+j]); DBG_8192C(" "); sprintf(extra,"%s ",extra); } wrqu->length = strlen(extra); return 0; } else if ( strcmp(tmp[0],"vidpid") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; #ifdef CONFIG_RTL8192C addr=0x0a; #endif #ifdef CONFIG_RTL8192D addr = 0x0c; #endif cnts = 4; EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + mapLen) > max_available_size) { DBG_8192C("(addr + cnts parameter error \n"); return -EFAULT; } if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) { DBG_8192C("rtw_efuse_access error \n"); } else{ DBG_8192C("rtw_efuse_access ok \n"); } _rtw_memset(extra, '\0', sizeof(extra)); for ( i = 0; i < cnts; i ++) { DBG_8192C("0x%02x", data[i]); sprintf(extra, "%s 0x%02X", extra, data[i+j]); DBG_8192C(" "); sprintf(extra,"%s ",extra); } wrqu->length = strlen(extra); return 0; } else if ( strcmp(tmp[0],"ableraw") == 0 ) { efuse_GetCurrentSize(padapter,&raw_cursize); raw_maxsize = efuse_GetMaxSize(padapter); sprintf(extra, "%s : [ available raw size] = %d",extra,raw_maxsize-raw_cursize); wrqu->length = strlen(extra); return 0; }else { sprintf(extra, "%s : Command not found\n",extra); wrqu->length = strlen(extra); return 0; } return 0; } static int rtw_mp_efuse_set(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wdata, char *extra) { struct iw_point *wrqu = (struct iw_point *)wdata; PADAPTER padapter = rtw_netdev_priv(dev); u8 buffer[40]; u32 i,jj,kk; u8 setdata[EFUSE_MAP_SIZE]; u8 setrawdata[EFUSE_MAX_SIZE]; char *pch, *ptmp, *token, *edata,*tmp[3]={0x00,0x00,0x00}; u16 addr = 0, max_available_size = 0; u32 cnts = 0; pch = extra; DBG_8192C("%s: in=%s\n", __func__, extra); i=0; while ( (token = strsep (&pch,",") )!=NULL ) { if(i>2) break; tmp[i] = token; i++; } // tmp[0],[1],[2] // wmap,addr,00e04c871200 if ( strcmp(tmp[0],"wmap") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; if ( ! strlen( tmp[2] )/2 > 1 ) return -EFAULT; addr = simple_strtoul( tmp[1], &ptmp, 16 ); addr = addr & 0xFF; DBG_8192C("addr = %x \n" ,addr); cnts = strlen( tmp[2] )/2; if ( cnts == 0) return -EFAULT; DBG_8192C("cnts = %d \n" ,cnts); DBG_8192C("target data = %s \n" ,tmp[2]); for( jj = 0, kk = 0; jj < cnts; jj++, kk += 2 ) { setdata[jj] = key_2char2num( tmp[2][kk], tmp[2][kk+ 1] ); } EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + cnts) > max_available_size) { DBG_8192C("parameter error \n"); return -EFAULT; } if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) { DBG_8192C("rtw_efuse_map_write error \n"); return -EFAULT; } else DBG_8192C("rtw_efuse_map_write ok \n"); return 0; } else if ( strcmp(tmp[0],"wraw") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; if ( ! strlen( tmp[2] )/2 > 1 ) return -EFAULT; addr = simple_strtoul( tmp[1], &ptmp, 16 ); addr = addr & 0xFF; DBG_8192C("addr = %x \n" ,addr); cnts=strlen( tmp[2] )/2; if ( cnts == 0) return -EFAULT; DBG_8192C(" cnts = %d \n" ,cnts ); DBG_8192C("target data = %s \n" ,tmp[2] ); for( jj = 0, kk = 0; jj < cnts; jj++, kk += 2 ) { setrawdata[jj] = key_2char2num( tmp[2][kk], tmp[2][kk+ 1] ); } if ( rtw_efuse_access( padapter, _TRUE, addr, cnts, setrawdata ) == _FAIL ){ DBG_8192C("\t rtw_efuse_map_read : Fail \n"); return -EFAULT; } else DBG_8192C("\t rtw_efuse_access raw ok \n"); return 0; } else if ( strcmp(tmp[0],"mac") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; //mac,00e04c871200 #ifdef CONFIG_RTL8192C addr = 0x16; #endif #ifdef CONFIG_RTL8192D addr = 0x19; #endif cnts = strlen( tmp[1] )/2; if ( cnts == 0) return -EFAULT; if ( cnts > 6 ){ DBG_8192C("error data for mac addr = %s \n" ,tmp[1]); return -EFAULT; } DBG_8192C("target data = %s \n" ,tmp[1]); for( jj = 0, kk = 0; jj < cnts; jj++, kk += 2 ) { setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk+ 1]); } EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + cnts) > max_available_size) { DBG_8192C("parameter error \n"); return -EFAULT; } if ( rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL ) { DBG_8192C("rtw_efuse_map_write error \n"); return -EFAULT; } else DBG_8192C("rtw_efuse_map_write ok \n"); return 0; } else if ( strcmp(tmp[0],"vidpid") == 0 ) { if ( tmp[1]==NULL || tmp[2]==NULL ) return -EINVAL; // pidvid,da0b7881 #ifdef CONFIG_RTL8192C addr=0x0a; #endif #ifdef CONFIG_RTL8192D addr = 0x0c; #endif cnts=strlen( tmp[1] )/2; if ( cnts == 0) return -EFAULT; DBG_8192C("target data = %s \n" ,tmp[1]); for( jj = 0, kk = 0; jj < cnts; jj++, kk += 2 ) { setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk+ 1]); } EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if ((addr + cnts) > max_available_size) { DBG_8192C("parameter error \n"); return -EFAULT; } if ( rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL ) { DBG_8192C("rtw_efuse_map_write error \n"); return -EFAULT; } else DBG_8192C("rtw_efuse_map_write ok \n"); return 0; } else{ DBG_8192C("Command not found\n"); return 0; } return 0; } #if defined(CONFIG_MP_INCLUDED) && defined(CONFIG_MP_IWPRIV_SUPPORT) /* * Input Format: %s,%d,%d * %s is width, could be * "b" for 1 byte * "w" for WORD (2 bytes) * "dw" for DWORD (4 bytes) * 1st %d is address(offset) * 2st %d is data to write */ static int rtw_mp_write_reg(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { char *pch, *pnext, *ptmp; char *width_str; char width; u32 addr, data; int ret; PADAPTER padapter = rtw_netdev_priv(dev); pch = extra; pnext = strpbrk(pch, " ,.-"); if (pnext == NULL) return -EINVAL; *pnext = 0; width_str = pch; pch = pnext + 1; pnext = strpbrk(pch, " ,.-"); if (pnext == NULL) return -EINVAL; *pnext = 0; addr = simple_strtoul(pch, &ptmp, 16); if (addr > 0x3FFF) return -EINVAL; pch = pnext + 1; if ((pch - extra) >= wrqu->length) return -EINVAL; data = simple_strtoul(pch, &ptmp, 16); ret = 0; width = width_str[0]; switch (width) { case 'b': // 1 byte if (data > 0xFF) { ret = -EINVAL; break; } rtw_write8(padapter, addr, data); break; case 'w': // 2 bytes if (data > 0xFFFF) { ret = -EINVAL; break; } rtw_write16(padapter, addr, data); break; case 'd': // 4 bytes rtw_write32(padapter, addr, data); break; default: ret = -EINVAL; break; } return ret; } /* * Input Format: %s,%d * %s is width, could be * "b" for 1 byte * "w" for WORD (2 bytes) * "dw" for DWORD (4 bytes) * %d is address(offset) * * Return: * %d for data readed */ static int rtw_mp_read_reg(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { char input[wrqu->length]; char *pch, *pnext, *ptmp; char *width_str; char width; char data[20],tmp[20]; u32 addr; //u32 *data = (u32*)extra; u32 ret, i=0, j=0, strtout=0; PADAPTER padapter = rtw_netdev_priv(dev); if (wrqu->length > 128) return -EFAULT; if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; pch = input; pnext = strpbrk(pch, " ,.-"); if (pnext == NULL) return -EINVAL; *pnext = 0; width_str = pch; pch = pnext + 1; if ((pch - input) >= wrqu->length) return -EINVAL; addr = simple_strtoul(pch, &ptmp, 16); if (addr > 0x3FFF) return -EINVAL; ret = 0; width = width_str[0]; switch (width) { case 'b': // 1 byte // *(u8*)data = rtw_read8(padapter, addr); sprintf(extra, "%d\n", rtw_read8(padapter, addr)); wrqu->length = 4; break; case 'w': // 2 bytes //*(u16*)data = rtw_read16(padapter, addr); sprintf(data, "%04d\n", rtw_read16(padapter, addr)); for( i=0 ; i <= strlen(data) ; i++) { if( i%2==0 ) { tmp[j]=' '; j++; } if ( data[i] != '\0' ) tmp[j] = data[i]; j++; } pch = tmp; DBG_8192C("pch=%s",pch); while( *pch != '\0' ) { pnext = strpbrk(pch, " "); pnext++; if ( *pnext != '\0' ) { strtout = simple_strtoul (pnext , &ptmp, 16); sprintf( extra, "%s %d" ,extra ,strtout ); } else{ break; } pch = pnext; } wrqu->length = 8; break; case 'd': // 4 bytes //*data = rtw_read32(padapter, addr); sprintf(data, "%08x", rtw_read32(padapter, addr)); //add read data format blank for( i=0 ; i <= strlen(data) ; i++) { if( i%2==0 ) { tmp[j]=' '; j++; } tmp[j] = data[i]; j++; } pch = tmp; DBG_8192C("pch=%s",pch); while( *pch != '\0' ) { pnext = strpbrk(pch, " "); pnext++; if ( *pnext != '\0' ) { strtout = simple_strtoul (pnext , &ptmp, 16); sprintf( extra, "%s %d" ,extra ,strtout ); } else{ break; } pch = pnext; } wrqu->length = 20; break; default: wrqu->length = 0; ret = -EINVAL; break; } return ret; } /* * Input Format: %d,%x,%x * %d is RF path, should be smaller than MAX_RF_PATH_NUMS * 1st %x is address(offset) * 2st %x is data to write */ static int rtw_mp_write_rf(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { /*static int rtw_mp_write_rf(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) */ u32 path, addr, data; int ret; PADAPTER padapter = rtw_netdev_priv(dev); ret = sscanf(extra, "%d,%x,%x", &path, &addr, &data); if (ret < 3) return -EINVAL; if (path >= MAX_RF_PATH_NUMS) return -EINVAL; if (addr > 0xFF) return -EINVAL; if (data > 0xFFFFF) return -EINVAL; write_rfreg(padapter, path, addr, data); return 0; } /* * Input Format: %d,%x * %d is RF path, should be smaller than MAX_RF_PATH_NUMS * %x is address(offset) * * Return: * %d for data readed */ static int rtw_mp_read_rf(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { char input[wrqu->length]; char *pch, *pnext, *ptmp; char data[20],tmp[20]; //u32 *data = (u32*)extra; u32 path, addr; u32 ret,i=0 ,j=0,strtou=0; PADAPTER padapter = rtw_netdev_priv(dev); if (wrqu->length > 128) return -EFAULT; if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; ret = sscanf(extra, "%d,%x", &path, &addr); if (ret < 2) return -EINVAL; if (path >= MAX_RF_PATH_NUMS) return -EINVAL; if (addr > 0xFF) return -EINVAL; //*data = read_rfreg(padapter, path, addr); sprintf(data, "%08x", read_rfreg(padapter, path, addr)); //add read data format blank for( i=0 ; i <= strlen(data) ; i++) { if( i%2==0 ) { tmp[j]=' '; j++; } tmp[j] = data[i]; j++; } pch = tmp; DBG_8192C("pch=%s",pch); while( *pch != '\0' ) { pnext = strpbrk(pch, " "); pnext++; if ( *pnext != '\0' ) { strtou = simple_strtoul (pnext , &ptmp, 16); sprintf( extra, "%s %d" ,extra ,strtou ); } else{ break; } pch = pnext; } wrqu->length = 10; return 0; } static int rtw_mp_start(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u8 val8; PADAPTER padapter = rtw_netdev_priv(dev); if (padapter->registrypriv.mp_mode == 0) return -EPERM; if (padapter->mppriv.mode == MP_OFF) { if (mp_start_test(padapter) == _FAIL) return -EPERM; padapter->mppriv.mode = MP_ON; } return 0; } static int rtw_mp_stop(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { PADAPTER padapter = rtw_netdev_priv(dev); if (padapter->mppriv.mode != MP_OFF) { mp_stop_test(padapter); padapter->mppriv.mode = MP_OFF; } return 0; } extern int wifirate2_ratetbl_inx(unsigned char rate); static int rtw_mp_rate(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u32 rate = MPT_RATE_1M; u8 input[wrqu->length]; PADAPTER padapter = rtw_netdev_priv(dev); if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; rate = rtw_atoi(input); sprintf( extra, "Set data rate to %d" , rate ); if(rate <= 0x7f) rate = wifirate2_ratetbl_inx( (u8)rate); else rate =(rate-0x80+MPT_RATE_MCS0); //DBG_8192C("%s: rate=%d\n", __func__, rate); if (rate >= MPT_RATE_LAST ) return -EINVAL; padapter->mppriv.rateidx = rate; Hal_SetDataRate(padapter); wrqu->length = strlen(extra) + 1; return 0; } static int rtw_mp_channel(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { PADAPTER padapter = rtw_netdev_priv(dev); u8 input[wrqu->length]; u32 channel = 1; if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; channel = rtw_atoi(input); //DBG_8192C("%s: channel=%d\n", __func__, channel); sprintf( extra, "Change channel %d to channel %d", padapter->mppriv.channel , channel ); padapter->mppriv.channel = channel; Hal_SetChannel(padapter); wrqu->length = strlen(extra) + 1; return 0; } static int rtw_mp_bandwidth(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u32 bandwidth=0, sg=0; //u8 buffer[40]; PADAPTER padapter = rtw_netdev_priv(dev); //if (copy_from_user(buffer, (void*)wrqu->data.pointer, wrqu->data.length)) // return -EFAULT; //DBG_8192C("%s:iwpriv in=%s\n", __func__, extra); sscanf(extra, "40M=%d,shortGI=%d", &bandwidth, &sg); if (bandwidth != HT_CHANNEL_WIDTH_40) bandwidth = HT_CHANNEL_WIDTH_20; //DBG_8192C("%s: bw=%d sg=%d \n", __func__, bandwidth , sg); padapter->mppriv.bandwidth = (u8)bandwidth; padapter->mppriv.preamble = sg; SetBandwidth(padapter); return 0; } static int rtw_mp_txpower(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u32 idx_a=0,idx_b=0; u8 input[wrqu->length]; PADAPTER padapter = rtw_netdev_priv(dev); if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; sscanf(input,"patha=%d,pathb=%d",&idx_a,&idx_b); //DBG_8192C("%s: tx_pwr_idx_a=%x b=%x\n", __func__, idx_a, idx_b); sprintf( extra, "Set power level path_A:%d path_B:%d", idx_a , idx_b ); padapter->mppriv.txpoweridx = (u8)idx_a; padapter->mppriv.txpoweridx_b = (u8)idx_b; Hal_SetAntennaPathPower(padapter); wrqu->length = strlen(extra) + 1; return 0; } static int rtw_mp_ant_tx(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u8 i; u8 input[wrqu->length]; u16 antenna = 0; PADAPTER padapter = rtw_netdev_priv(dev); if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; DBG_8192C("%s: input=%s\n", __func__, input); sprintf( extra, "switch Tx antenna to %s", input ); for (i=0; i < strlen(input); i++) { switch(input[i]) { case 'a' : antenna|=ANTENNA_A; break; case 'b': antenna|=ANTENNA_B; break; } } //antenna |= BIT(extra[i]-'a'); DBG_8192C("%s: antenna=0x%x\n", __func__, antenna); padapter->mppriv.antenna_tx = antenna; DBG_8192C("%s:mppriv.antenna_rx=%d\n", __func__, padapter->mppriv.antenna_tx); Hal_SetAntenna(padapter); wrqu->length = strlen(extra) + 1; return 0; } static int rtw_mp_ant_rx(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u8 i; u16 antenna = 0; u8 input[wrqu->length]; PADAPTER padapter = rtw_netdev_priv(dev); if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; //DBG_8192C("%s: input=%s\n", __func__, input); for (i=0; i < strlen(input); i++) { switch( input[i] ) { case 'a' : antenna|=ANTENNA_A; break; case 'b': antenna|=ANTENNA_B; break; } } //DBG_8192C("%s: antenna=0x%x\n", __func__, antenna); padapter->mppriv.antenna_rx = antenna; //DBG_8192C("%s:mppriv.antenna_rx=%d\n", __func__, padapter->mppriv.antenna_rx); Hal_SetAntenna(padapter); wrqu->length = strlen(extra) + 1; return 0; } static int rtw_mp_ctx(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u32 pkTx = 1, countPkTx = 1, cotuTx = 1, CarrSprTx = 1, scTx = 1, sgleTx = 1, stop = 1; u32 bStartTest = 1; u32 count = 0; struct mp_priv *pmp_priv; struct pkt_attrib *pattrib; PADAPTER padapter = rtw_netdev_priv(dev); pmp_priv = &padapter->mppriv; if (copy_from_user(extra, wrqu->pointer, wrqu->length)) return -EFAULT; DBG_8192C("%s: in=%s\n", __func__, extra); countPkTx = strncmp(extra, "count=", 5); // strncmp TRUE is 0 cotuTx = strncmp(extra, "background", 20); CarrSprTx = strncmp(extra, "background,cs", 20); scTx = strncmp(extra, "background,sc", 20); sgleTx = strncmp(extra, "background,stone", 20); pkTx = strncmp(extra, "background,pkt", 20); stop = strncmp(extra, "stop", 5); sscanf(extra, "count=%d,pkt", &count); //DBG_8192C("%s: count=%d countPkTx=%d cotuTx=%d CarrSprTx=%d scTx=%d sgleTx=%d pkTx=%d stop=%d\n", __func__, count, countPkTx, cotuTx, CarrSprTx, pkTx, sgleTx, scTx, stop); _rtw_memset(extra, '\0', sizeof(extra)); if (stop == 0) { bStartTest = 0; // To set Stop pmp_priv->tx.stop = 1; sprintf( extra, "Stop continuous Tx"); } else { bStartTest = 1; if (pmp_priv->mode != MP_ON) { if (pmp_priv->tx.stop != 1) { DBG_8192C("%s: MP_MODE != ON %d\n", __func__, pmp_priv->mode); return -EFAULT; } } } if (pkTx == 0 || countPkTx == 0) pmp_priv->mode = MP_PACKET_TX; if (sgleTx == 0) pmp_priv->mode = MP_SINGLE_TONE_TX; if (cotuTx == 0) pmp_priv->mode = MP_CONTINUOUS_TX; if (CarrSprTx == 0) pmp_priv->mode = MP_CARRIER_SUPPRISSION_TX; if (scTx == 0) pmp_priv->mode = MP_SINGLE_CARRIER_TX; switch (pmp_priv->mode) { case MP_PACKET_TX: //DBG_8192C("%s:pkTx %d\n", __func__,bStartTest); if (bStartTest == 0) { pmp_priv->tx.stop = 1; pmp_priv->mode = MP_ON; sprintf( extra, "Stop continuous Tx"); } else if (pmp_priv->tx.stop == 1) { sprintf( extra, "Start continuous DA=ffffffffffff len=1500 count=%u,\n",count); //DBG_8192C("%s:countPkTx %d\n", __func__,count); pmp_priv->tx.stop = 0; pmp_priv->tx.count = count; pmp_priv->tx.payload = 2; pattrib = &pmp_priv->tx.attrib; pattrib->pktlen = 1460; _rtw_memset(pattrib->dst, 0xFF, ETH_ALEN); SetPacketTx(padapter); } else { //DBG_8192C("%s: pkTx not stop\n", __func__); return -EFAULT; } wrqu->length = strlen(extra); return 0; case MP_SINGLE_TONE_TX: //DBG_8192C("%s: sgleTx %d \n", __func__, bStartTest); if (bStartTest != 0){ sprintf( extra, "Start continuous DA=ffffffffffff len=1500 \n infinite=yes."); Hal_SetSingleToneTx(padapter, (u8)bStartTest); } break; case MP_CONTINUOUS_TX: //DBG_8192C("%s: cotuTx %d\n", __func__, bStartTest); if (bStartTest != 0){ sprintf( extra, "Start continuous DA=ffffffffffff len=1500 \n infinite=yes."); Hal_SetContinuousTx(padapter, (u8)bStartTest); } break; case MP_CARRIER_SUPPRISSION_TX: //DBG_8192C("%s: CarrSprTx %d\n", __func__, bStartTest); if (bStartTest != 0){ if( pmp_priv->rateidx <= MPT_RATE_11M ) { sprintf( extra, "Start continuous DA=ffffffffffff len=1500 \n infinite=yes."); Hal_SetCarrierSuppressionTx(padapter, (u8)bStartTest); }else sprintf( extra, "Specify carrier suppression but not CCK rate"); } break; case MP_SINGLE_CARRIER_TX: //DBG_8192C("%s: scTx %d\n", __func__, bStartTest); if (bStartTest != 0){ sprintf( extra, "Start continuous DA=ffffffffffff len=1500 \n infinite=yes."); Hal_SetSingleCarrierTx(padapter, (u8)bStartTest); } break; default: //DBG_8192C("%s:No Match MP_MODE\n", __func__); sprintf( extra, "Error! Continuous-Tx is not on-going."); return -EFAULT; } if (bStartTest) { struct mp_priv *pmp_priv = &padapter->mppriv; if (pmp_priv->tx.stop == 0) { pmp_priv->tx.stop = 1; //DBG_8192C("%s: pkt tx is running...\n", __func__); rtw_msleep_os(5); } pmp_priv->tx.stop = 0; pmp_priv->tx.count = 1; SetPacketTx(padapter); } else { pmp_priv->mode = MP_ON; } wrqu->length = strlen(extra); return 0; } static int rtw_mp_arx(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u8 bStartRx=0,bStopRx=0; PADAPTER padapter = rtw_netdev_priv(dev); u8 input[wrqu->length]; if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; DBG_8192C("%s: %s\n", __func__, input); bStartRx = (strncmp(input, "start", 5)==0)?1:0; // strncmp TRUE is 0 bStopRx = (strncmp(input, "stop", 5)==0)?1:0; // strncmp TRUE is 0 SetPacketRx(padapter, bStartRx); if(bStartRx) { sprintf( extra, "start"); wrqu->length = strlen(extra) + 1; } else if(bStopRx) { sprintf( extra, "Received packet OK:%d CRC error:%d",padapter->mppriv.rx_pktcount, padapter->mppriv.rx_crcerrpktcount); wrqu->length = strlen(extra) + 1; } return 0; } static int rtw_mp_trx_query(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u32 txok,txfail,rxok,rxfail; PADAPTER padapter = rtw_netdev_priv(dev); //if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) // return -EFAULT; txok=padapter->mppriv.tx.sended; txfail=0; rxok = padapter->mppriv.rx_pktcount; rxfail = padapter->mppriv.rx_crcerrpktcount; _rtw_memset(extra, '\0', 128); sprintf(extra, "Tx OK:%d, Tx Fail:%d, Rx OK:%d, CRC error:%d ", txok, txfail,rxok,rxfail); wrqu->length=strlen(extra)+1; return 0; } static int rtw_mp_pwrtrk(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u8 enable; u32 thermal; s32 ret; PADAPTER padapter = rtw_netdev_priv(dev); enable = 1; if (wrqu->length > 1) { // not empty string if (strncmp(extra, "stop", 4) == 0) enable = 0; else { if (sscanf(extra, "ther=%d", &thermal)) { ret = Hal_SetThermalMeter(padapter, (u8)thermal); if (ret == _FAIL) return -EPERM; } else return -EINVAL; } } ret = Hal_SetPowerTracking(padapter, enable); if (ret == _FAIL) return -EPERM; return 0; } static int rtw_mp_psd(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { PADAPTER padapter = rtw_netdev_priv(dev); if (copy_from_user(extra, wrqu->pointer, wrqu->length)) return -EFAULT; wrqu->length = mp_query_psd(padapter, extra); return 0; } static int rtw_mp_thermal(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { u8 val; u16 bwrite=1; #ifdef CONFIG_RTL8192C u16 addr=0x78; #endif #ifdef CONFIG_RTL8192D u16 addr=0xc3; #endif u16 cnt=1; u16 max_available_size=0; PADAPTER padapter = rtw_netdev_priv(dev); if (copy_from_user(extra, wrqu->pointer, wrqu->length)) return -EFAULT; //DBG_8192C("print extra %s \n",extra); bwrite = strncmp(extra, "write", 6); // strncmp TRUE is 0 Hal_GetThermalMeter(padapter, &val); if( bwrite == 0 ) { //DBG_8192C("to write val:%d",val); EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (PVOID)&max_available_size, _FALSE); if( 2 > max_available_size ) { DBG_8192C("no available efuse!\n"); return -EFAULT; } if ( rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL ) { DBG_8192C("rtw_efuse_map_write error \n"); return -EFAULT; } else { sprintf(extra, " efuse write ok :%d", val); } } else { sprintf(extra, "%d", val); } wrqu->length = strlen(extra); return 0; } static int rtw_mp_reset_stats(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { struct mp_priv *pmp_priv; struct pkt_attrib *pattrib; PADAPTER padapter = rtw_netdev_priv(dev); pmp_priv = &padapter->mppriv; pmp_priv->tx.sended = 0; padapter->mppriv.rx_pktcount = 0; padapter->mppriv.rx_crcerrpktcount = 0; return 0; } static int rtw_mp_dump(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { struct mp_priv *pmp_priv; struct pkt_attrib *pattrib; u32 value; u8 rf_type,path_nums = 0; u32 i,j=1,path; PADAPTER padapter = rtw_netdev_priv(dev); pmp_priv = &padapter->mppriv; //if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) // return -EFAULT; if ( strncmp(extra, "all", 4)==0 ) { DBG_8192C("\n======= MAC REG =======\n"); for ( i=0x0;i<0x300;i+=4 ) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } for( i=0x400;i<0x800;i+=4 ) { if(j%4==1) DBG_8192C("0x%02x",i); DBG_8192C(" 0x%08x ",rtw_read32(padapter,i)); if((j++)%4 == 0) DBG_8192C("\n"); } i,j=1; padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type)); DBG_8192C("\n======= RF REG =======\n"); if(( RF_1T2R == rf_type ) ||( RF_1T1R ==rf_type )) path_nums = 1; else path_nums = 2; for(path=0;path<path_nums;path++) { #ifdef CONFIG_RTL8192D for (i = 0; i < 0x50; i++) #else for (i = 0; i < 0x34; i++) #endif { //value = PHY_QueryRFReg(padapter, (RF90_RADIO_PATH_E)path,i, bMaskDWord); value =padapter->HalFunc.read_rfreg(padapter, path, i, 0xffffffff); if(j%4==1) DBG_8192C("0x%02x ",i); DBG_8192C(" 0x%08x ",value); if((j++)%4==0) DBG_8192C("\n"); } } } return 0; } static int rtw_mp_phypara(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { PADAPTER padapter = rtw_netdev_priv(dev); char input[wrqu->length]; u32 valxcap; if (copy_from_user(input, wrqu->pointer, wrqu->length)) return -EFAULT; DBG_8192C("%s:iwpriv in=%s\n", __func__, input); sscanf(input, "xcap=%d", &valxcap); if (!IS_HARDWARE_TYPE_8192D(padapter)) return 0; #ifdef CONFIG_RTL8192D Hal_ProSetCrystalCap( padapter , valxcap ); #endif sprintf( extra, "Set xcap=%d",valxcap ); wrqu->length = strlen(extra) + 1; return 0; } /* update Tx AGC offset */ static int rtw_mp_antBdiff(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrqu, char *extra) { // MPT_ProSetTxAGCOffset return 0; } static int rtw_mp_set(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wdata, char *extra) { struct iw_point *wrqu = (struct iw_point *)wdata; u32 subcmd = wrqu->flags; PADAPTER padapter = rtw_netdev_priv(dev); if (padapter == NULL) { return -ENETDOWN; } //_rtw_memset(extra, 0x00, IW_PRIV_SIZE_MASK); if (extra == NULL) { wrqu->length = 0; return -EIO; } switch(subcmd) { case WRITE_REG : rtw_mp_write_reg (dev,info,wrqu,extra); break; case WRITE_RF: rtw_mp_write_rf (dev,info,wrqu,extra); break; case MP_START: DBG_8192C("set case mp_start \n"); rtw_mp_start (dev,info,wrqu,extra); break; case MP_STOP: DBG_8192C("set case mp_stop \n"); rtw_mp_stop (dev,info,wrqu,extra); break; case MP_BANDWIDTH: DBG_8192C("set case mp_bandwidth \n"); rtw_mp_bandwidth (dev,info,wrqu,extra); break; case MP_PWRTRK: DBG_8192C("set case MP_PWRTRK \n"); rtw_mp_pwrtrk (dev,info,wrqu,extra); break; case MP_RESET_STATS: DBG_8192C("set case MP_RESET_STATS \n"); rtw_mp_reset_stats (dev,info,wrqu,extra); break; case EFUSE_SET: DBG_8192C("efuse set \n"); rtw_mp_efuse_set (dev,info,wdata,extra); break; } return 0; } static int rtw_mp_get(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wdata, char *extra) { struct iw_point *wrqu = (struct iw_point *)wdata; u32 subcmd = wrqu->flags; PADAPTER padapter = rtw_netdev_priv(dev); DBG_8192C("in mp_get extra= %s \n",extra); if (padapter == NULL) { return -ENETDOWN; } if (extra == NULL) { wrqu->length = 0; return -EIO; } switch(subcmd) { case MP_PHYPARA: DBG_8192C("mp_get MP_PHYPARA \n"); rtw_mp_phypara(dev,info,wrqu,extra); break; case MP_CHANNEL: DBG_8192C("set case mp_channel \n"); rtw_mp_channel (dev,info,wrqu,extra); break; case READ_REG: DBG_8192C("mp_get READ_REG \n"); rtw_mp_read_reg (dev,info,wrqu,extra); break; case READ_RF: DBG_8192C("mp_get READ_RF \n"); rtw_mp_read_rf (dev,info,wrqu,extra); break; case MP_RATE: DBG_8192C("set case mp_rate \n"); rtw_mp_rate (dev,info,wrqu,extra); break; case MP_TXPOWER: DBG_8192C("set case MP_TXPOWER \n"); rtw_mp_txpower (dev,info,wrqu,extra); break; case MP_ANT_TX: DBG_8192C("set case MP_ANT_TX \n"); rtw_mp_ant_tx (dev,info,wrqu,extra); break; case MP_ANT_RX: DBG_8192C("set case MP_ANT_RX \n"); rtw_mp_ant_rx (dev,info,wrqu,extra); break; case MP_QUERY: DBG_8192C("mp_get mp_query MP_QUERY \n"); rtw_mp_trx_query(dev,info,wrqu,extra); break; case MP_CTX: DBG_8192C("set case MP_CTX \n"); rtw_mp_ctx (dev,info,wrqu,extra); break; case MP_ARX: DBG_8192C("set case MP_ARX \n"); rtw_mp_arx (dev,info,wrqu,extra); break; case EFUSE_GET: DBG_8192C("efuse get EFUSE_GET \n"); rtw_mp_efuse_get(dev,info,wdata,extra); break; case MP_DUMP: DBG_8192C("set case MP_DUMP \n"); rtw_mp_dump (dev,info,wrqu,extra); break; case MP_PSD: DBG_8192C("set case MP_PSD \n"); rtw_mp_psd (dev,info,wrqu,extra); break; case MP_THER: DBG_8192C("set case MP_THER \n"); rtw_mp_thermal (dev,info,wrqu,extra); break; } return 0; } #endif //#if defined(CONFIG_MP_INCLUDED) && defined(CONFIG_MP_IWPRIV_SUPPORT) static int rtw_tdls_setup(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS u8 i, j; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u8 mac_addr[ETH_ALEN]; DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); for( i=0, j=0 ; i < ETH_ALEN; i++, j+=3 ){ mac_addr[i]=key_2char2num(*(extra+j), *(extra+j+1)); } issue_tdls_setup_req(padapter, mac_addr); #endif return ret; } static int rtw_tdls_teardown(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS u8 i,j; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct sta_info *ptdls_sta = NULL; u8 mac_addr[ETH_ALEN]; DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); for( i=0, j=0 ; i < ETH_ALEN; i++, j+=3 ){ mac_addr[i]=key_2char2num(*(extra+j), *(extra+j+1)); } ptdls_sta = rtw_get_stainfo( &(padapter->stapriv), mac_addr); if(ptdls_sta != NULL) { ptdls_sta->stat_code = _RSON_TDLS_TEAR_UN_RSN_; issue_tdls_teardown(padapter, mac_addr); } #endif return ret; } static int rtw_tdls_discovery(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); issue_tdls_dis_req(padapter, NULL); #endif return ret; } static int rtw_tdls_ch_switch(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct tdls_info *ptdlsinfo = &padapter->tdlsinfo; u8 i, j, mac_addr[ETH_ALEN]; struct sta_info *ptdls_sta = NULL; DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); for( i=0, j=0 ; i < ETH_ALEN; i++, j+=3 ){ mac_addr[i]=key_2char2num(*(extra+j), *(extra+j+1)); } ptdls_sta = rtw_get_stainfo(&padapter->stapriv, mac_addr); if( ptdls_sta == NULL ) return ret; ptdls_sta->option=4; ptdlsinfo->ch_sensing=1; rtw_tdls_cmd(padapter, ptdls_sta->hwaddr, TDLS_INIT_CH_SEN); #endif return ret; } static int rtw_tdls_pson(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); u8 i, j, mac_addr[ETH_ALEN]; struct sta_info *ptdls_sta = NULL; DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); for( i=0, j=0 ; i < ETH_ALEN; i++, j+=3 ){ mac_addr[i]=key_2char2num(*(extra+j), *(extra+j+1)); } ptdls_sta = rtw_get_stainfo(&padapter->stapriv, mac_addr); issue_nulldata_to_TDLS_peer_STA(padapter, ptdls_sta, 1); #endif return ret; } static int rtw_tdls_psoff(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); u8 i, j, mac_addr[ETH_ALEN]; struct sta_info *ptdls_sta = NULL; DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); for( i=0, j=0 ; i < ETH_ALEN; i++, j+=3 ){ mac_addr[i]=key_2char2num(*(extra+j), *(extra+j+1)); } ptdls_sta = rtw_get_stainfo(&padapter->stapriv, mac_addr); issue_nulldata_to_TDLS_peer_STA(padapter, ptdls_sta, 0); #endif return ret; } static int rtw_tdls_ch_switch_off(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); u8 i, j, mac_addr[ETH_ALEN]; struct sta_info *ptdls_sta = NULL; DBG_8192C( "[%s] %s %d\n", __FUNCTION__, extra, wrqu->data.length -1 ); for( i=0, j=0 ; i < ETH_ALEN; i++, j+=3 ){ mac_addr[i]=key_2char2num(*(extra+j), *(extra+j+1)); } ptdls_sta = rtw_get_stainfo(&padapter->stapriv, mac_addr); ptdls_sta->tdls_sta_state |= TDLS_SW_OFF_STATE; /* if((ptdls_sta->tdls_sta_state & TDLS_AT_OFF_CH_STATE) && (ptdls_sta->tdls_sta_state & TDLS_PEER_AT_OFF_STATE)){ pmlmeinfo->tdls_candidate_ch= pmlmeext->cur_channel; issue_tdls_ch_switch_req(padapter, mac_addr); DBG_8192C("issue tdls ch switch req back to base channel\n"); } */ #endif return ret; } static int rtw_tdls(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #ifdef CONFIG_TDLS DBG_8192C( "[%s] extra = %s\n", __FUNCTION__, extra ); if ( _rtw_memcmp( extra, "setup=", 6 ) ) { wrqu->data.length -=6; rtw_tdls_setup( dev, info, wrqu, &extra[6] ); } else if (_rtw_memcmp( extra, "tear=", 5 ) ) { wrqu->data.length -= 5; rtw_tdls_teardown( dev, info, wrqu, &extra[5] ); } else if (_rtw_memcmp( extra, "dis=", 4 ) ) { wrqu->data.length -= 4; rtw_tdls_discovery( dev, info, wrqu, &extra[4] ); } else if (_rtw_memcmp( extra, "sw=", 3 ) ) { wrqu->data.length -= 3; rtw_tdls_ch_switch( dev, info, wrqu, &extra[3] ); } else if (_rtw_memcmp( extra, "swoff=", 6 ) ) { wrqu->data.length -= 6; rtw_tdls_ch_switch_off( dev, info, wrqu, &extra[6] ); } else if (_rtw_memcmp( extra, "pson=", 5 ) ) { wrqu->data.length -= 5; rtw_tdls_pson( dev, info, wrqu, &extra[5] ); } else if (_rtw_memcmp( extra, "psoff=", 6 ) ) { wrqu->data.length -= 6; rtw_tdls_psoff( dev, info, wrqu, &extra[6] ); } #endif return ret; } static int rtw_pm_set_lps(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; u8 mode = 0; switch( wrqu->data.length -1 ) { case 1: { mode = extra[ 0 ] - '0'; break; } case 2: { mode = str_2char2num( extra[ 0 ], extra[ 1 ]); break; } } if ( mode < PS_MODE_NUM ) { if(pwrctrlpriv->power_mgnt !=mode) { if(PS_MODE_ACTIVE == mode) { LeaveAllPowerSaveMode(padapter); } else { pwrctrlpriv->LpsIdleCount = 2; } pwrctrlpriv->power_mgnt = mode; pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?_TRUE:_FALSE; } } else { ret = -1; } return ret; } static int rtw_pm_set_ips(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { _adapter *padapter = rtw_netdev_priv(dev); struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; unsigned mode = 0; sscanf(extra, "%u", &mode); if( mode == IPS_NORMAL || mode == IPS_LEVEL_2 ) { rtw_ips_mode_req(pwrctrlpriv, mode); pwrctrlpriv->power_mgnt = PS_MODE_MIN; rtw_set_pwr_state_check_timer(pwrctrlpriv); DBG_871X("%s %s\n", __FUNCTION__, mode == IPS_NORMAL?"IPS_NORMAL":"IPS_LEVEL_2"); return 0; } else if(mode ==IPS_NONE){ if(_FAIL == rfpwrstate_check(padapter)) { return -EFAULT; } pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE; } else { return -EFAULT; } return 0; } static int rtw_pm_set(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); DBG_8192C( "[%s] extra = %s\n", __FUNCTION__, extra ); if ( _rtw_memcmp( extra, "lps=", 4 ) ) { wrqu->data.length -= 4; rtw_pm_set_lps( dev, info, wrqu, &extra[4] ); } if ( _rtw_memcmp( extra, "ips=", 4 ) ) { wrqu->data.length -= 4; rtw_pm_set_ips(dev, info, wrqu, &extra[4]); } return ret; } static int rtw_wowlan_ctrl(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; struct oid_par_priv oid_par; struct wowlan_ioctl_param *poidparam; uint status=0; u16 len; u8 *pparmbuf = NULL, bset; _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_point *p = &wrqu->data; //DBG_871X("+rtw_wowlan_ctrl\n"); //mutex_lock(&ioctl_mutex); if ((!p->length) || (!p->pointer)) { ret = -EINVAL; goto _rtw_wowlan_ctrl_exit; } pparmbuf = NULL; bset = (u8)(p->flags & 0xFFFF); len = p->length; pparmbuf = (u8*)rtw_malloc(len); if (pparmbuf == NULL){ ret = -ENOMEM; goto _rtw_wowlan_ctrl_exit; } if (copy_from_user(pparmbuf, p->pointer, len)) { ret = -EFAULT; goto _rtw_wowlan_ctrl_exit_free; } poidparam = (struct wowlan_ioctl_param *)pparmbuf; if(padapter->pwrctrlpriv.bSupportRemoteWakeup==_FALSE){ ret = -EPERM; DBG_871X("+rtw_wowlan_ctrl: Device didn't support the remote wakeup!!\n"); goto _rtw_wowlan_ctrl_exit_free; } padapter->HalFunc.SetHwRegHandler(padapter,HW_VAR_WOWLAN,(u8 *)poidparam); DBG_871X("rtw_wowlan_ctrl: subcode [%d], len[%d], buffer_len[%d]\r\n", poidparam->subcode, poidparam->len, len); if (copy_to_user(p->pointer, pparmbuf, len)) { ret = -EFAULT; } _rtw_wowlan_ctrl_exit_free: //DBG_871X("-rtw_wowlan_ctrl( subcode = %d)\n", poidparam->subcode); rtw_mfree(pparmbuf, len); _rtw_wowlan_ctrl_exit: return ret; } #include <rtw_android.h> int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct iwreq *wrq = (struct iwreq *)rq; int ret=0; switch (cmd) { case RTL_IOCTL_WPA_SUPPLICANT: ret = wpa_supplicant_ioctl(dev, &wrq->u.data); break; #ifdef CONFIG_AP_MODE case RTL_IOCTL_HOSTAPD: ret = rtw_hostapd_ioctl(dev, &wrq->u.data); break; #endif case (SIOCDEVPRIVATE+1): ret = rtw_android_priv_cmd(dev, rq, cmd); break; default: ret = -EOPNOTSUPP; break; } return ret; } static iw_handler rtw_handlers[] = { NULL, /* SIOCSIWCOMMIT */ rtw_wx_get_name, /* SIOCGIWNAME */ dummy, /* SIOCSIWNWID */ dummy, /* SIOCGIWNWID */ rtw_wx_set_freq, /* SIOCSIWFREQ */ rtw_wx_get_freq, /* SIOCGIWFREQ */ rtw_wx_set_mode, /* SIOCSIWMODE */ rtw_wx_get_mode, /* SIOCGIWMODE */ dummy, /* SIOCSIWSENS */ rtw_wx_get_sens, /* SIOCGIWSENS */ NULL, /* SIOCSIWRANGE */ rtw_wx_get_range, /* SIOCGIWRANGE */ rtw_wx_set_priv, /* SIOCSIWPRIV */ NULL, /* SIOCGIWPRIV */ NULL, /* SIOCSIWSTATS */ NULL, /* SIOCGIWSTATS */ dummy, /* SIOCSIWSPY */ dummy, /* SIOCGIWSPY */ NULL, /* SIOCGIWTHRSPY */ NULL, /* SIOCWIWTHRSPY */ rtw_wx_set_wap, /* SIOCSIWAP */ rtw_wx_get_wap, /* SIOCGIWAP */ rtw_wx_set_mlme, /* request MLME operation; uses struct iw_mlme */ dummy, /* SIOCGIWAPLIST -- depricated */ rtw_wx_set_scan, /* SIOCSIWSCAN */ rtw_wx_get_scan, /* SIOCGIWSCAN */ rtw_wx_set_essid, /* SIOCSIWESSID */ rtw_wx_get_essid, /* SIOCGIWESSID */ dummy, /* SIOCSIWNICKN */ rtw_wx_get_nick, /* SIOCGIWNICKN */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ rtw_wx_set_rate, /* SIOCSIWRATE */ rtw_wx_get_rate, /* SIOCGIWRATE */ dummy, /* SIOCSIWRTS */ rtw_wx_get_rts, /* SIOCGIWRTS */ rtw_wx_set_frag, /* SIOCSIWFRAG */ rtw_wx_get_frag, /* SIOCGIWFRAG */ dummy, /* SIOCSIWTXPOW */ dummy, /* SIOCGIWTXPOW */ dummy, /* SIOCSIWRETRY */ rtw_wx_get_retry, /* SIOCGIWRETRY */ rtw_wx_set_enc, /* SIOCSIWENCODE */ rtw_wx_get_enc, /* SIOCGIWENCODE */ dummy, /* SIOCSIWPOWER */ rtw_wx_get_power, /* SIOCGIWPOWER */ NULL, /*---hole---*/ NULL, /*---hole---*/ rtw_wx_set_gen_ie, /* SIOCSIWGENIE */ NULL, /* SIOCGWGENIE */ rtw_wx_set_auth, /* SIOCSIWAUTH */ NULL, /* SIOCGIWAUTH */ rtw_wx_set_enc_ext, /* SIOCSIWENCODEEXT */ NULL, /* SIOCGIWENCODEEXT */ rtw_wx_set_pmkid, /* SIOCSIWPMKSA */ NULL, /*---hole---*/ }; #if defined(CONFIG_MP_INCLUDED) && defined(CONFIG_MP_IWPRIV_SUPPORT) static const struct iw_priv_args rtw_private_args[] = { { SIOCIWFIRSTPRIV + 0x00, IW_PRIV_TYPE_CHAR | 1024, 0 , ""}, //set { SIOCIWFIRSTPRIV + 0x01, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK , ""},//get /* --- sub-ioctls definitions --- */ { MP_START , IW_PRIV_TYPE_CHAR | 1024, 0, "mp_start" }, //set { MP_PHYPARA, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_phypara" },//get { MP_STOP , IW_PRIV_TYPE_CHAR | 1024, 0, "mp_stop" }, //set { MP_CHANNEL , IW_PRIV_TYPE_CHAR | 1024 , IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_channel" },//get { MP_BANDWIDTH , IW_PRIV_TYPE_CHAR | 1024, 0, "mp_bandwidth"}, //set { MP_RATE , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate" },//get { MP_RESET_STATS , IW_PRIV_TYPE_CHAR | 1024, 0, "mp_reset_stats"}, { MP_QUERY , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK , "mp_query"}, //get { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { READ_REG , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_reg" }, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_RATE , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate" }, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { READ_RF , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_rf" }, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_PSD , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_psd"}, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_DUMP, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_dump" }, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_TXPOWER , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_txpower"}, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_ANT_TX , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_tx"}, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_ANT_RX , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_rx"}, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { WRITE_REG , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_reg" }, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { WRITE_RF , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_rf" }, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_CTX , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ctx"}, { MP_NULL, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"},//set { MP_ARX , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_arx"}, { MP_ANT_RX , IW_PRIV_TYPE_CHAR | 1024, 0, "mp_ant_rx"}, { MP_THER , IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ther"}, { EFUSE_SET, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set" }, { EFUSE_GET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get" }, { MP_PWRTRK , IW_PRIV_TYPE_CHAR | 1024, 0, "mp_pwrtrk"}, { MP_IOCTL, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_ioctl"}, // mp_ioctl { SIOCIWFIRSTPRIV + 0x02, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK , "test"},//set }; static iw_handler rtw_private_handler[] = { rtw_mp_set, rtw_mp_get, }; #else // not inlucde MP static const struct iw_priv_args rtw_private_args[] = { { SIOCIWFIRSTPRIV + 0x0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "rtw_write32" }, { SIOCIWFIRSTPRIV + 0x1, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "rtw_read32" }, { SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext" }, { SIOCIWFIRSTPRIV + 0x3, 0, 0, "" // mp_ioctl }, { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo" }, { SIOCIWFIRSTPRIV + 0x5, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setpid" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start" }, //for PLATFORM_MT53XX { SIOCIWFIRSTPRIV + 0x7, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "get_sensitivity" }, { SIOCIWFIRSTPRIV + 0x8, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_prob_req_ie" }, { SIOCIWFIRSTPRIV + 0x9, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_assoc_req_ie" }, //for RTK_DMP_PLATFORM { SIOCIWFIRSTPRIV + 0xA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan" }, { SIOCIWFIRSTPRIV + 0xB, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "dbg" }, { SIOCIWFIRSTPRIV + 0xC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "rfw" }, { SIOCIWFIRSTPRIV + 0xD, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "rfr" }, { SIOCIWFIRSTPRIV + 0xE,0,0, "wowlan_ctrl" }, { SIOCIWFIRSTPRIV + 0x10, IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, 0, "p2p_set" }, { SIOCIWFIRSTPRIV + 0x11, IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN , "p2p_get" }, { SIOCIWFIRSTPRIV + 0x12, IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IFNAMSIZ , "p2p_get2" }, #ifdef CONFIG_TDLS {SIOCIWFIRSTPRIV + 0x13, IW_PRIV_TYPE_CHAR | 128, 0,"NULL"}, { SIOCIWFIRSTPRIV + 0x14, IW_PRIV_TYPE_CHAR | 64, 0, "tdls" }, #endif { SIOCIWFIRSTPRIV + 0x16, IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, 0, "pm_set" }, {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ , 0 , "rereg_nd_name"}, {SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 128, 0, "efuse_set"}, {SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR |IW_PRIV_SIZE_FIXED |0x700 ,"efuse_get"}, }; static iw_handler rtw_private_handler[] = { rtw_wx_write32, //0x00 rtw_wx_read32, //0x01 rtw_drvext_hdl, //0x02 rtw_mp_ioctl_hdl, //0x03 // for MM DTV platform rtw_get_ap_info, //0x04 rtw_set_pid, //0x05 rtw_wps_start, //0x06 // for PLATFORM_MT53XX rtw_wx_get_sensitivity, //0x07 rtw_wx_set_mtk_wps_probe_ie, //0x08 rtw_wx_set_mtk_wps_ie, //0x09 // for RTK_DMP_PLATFORM // Set Channel depend on the country code rtw_wx_set_channel_plan, //0x0A rtw_dbg_port, //0x0B rtw_wx_write_rf, //0x0C rtw_wx_read_rf, //0x0D rtw_wowlan_ctrl, //0x0E rtw_wx_priv_null, //0x0F rtw_p2p_set, //0x10 rtw_p2p_get, //0x11 rtw_p2p_get2, //0x12 NULL, //0x13 rtw_tdls, //0x14 rtw_wx_priv_null, //0x15 rtw_pm_set, //0x16 rtw_wx_priv_null, //0x17 rtw_rereg_nd_name, //0x18 rtw_wx_priv_null, //0x19 rtw_mp_efuse_set, //0x1A rtw_mp_efuse_get, //0x1B // 0x1C is reserved for hostapd }; #endif // #if defined(CONFIG_MP_INCLUDED) && defined(CONFIG_MP_IWPRIV_SUPPORT) #if WIRELESS_EXT >= 17 static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev) { _adapter *padapter = (_adapter *)rtw_netdev_priv(dev); struct iw_statistics *piwstats=&padapter->iwstats; int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) != _TRUE) { piwstats->qual.qual = 0; piwstats->qual.level = 0; piwstats->qual.noise = 0; //DBG_8192C("No link level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); } else{ #ifdef CONFIG_SIGNAL_DISPLAY_DBM tmp_level = translate_percentage_to_dbm(padapter->recvpriv.signal_strength); #else tmp_level = padapter->recvpriv.signal_strength; #endif tmp_qual = padapter->recvpriv.signal_qual; tmp_noise =padapter->recvpriv.noise; //DBG_8192C("level:%d, qual:%d, noise:%d, rssi (%d)\n", tmp_level, tmp_qual, tmp_noise,padapter->recvpriv.rssi); piwstats->qual.level = tmp_level; piwstats->qual.qual = tmp_qual; piwstats->qual.noise = tmp_noise; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) piwstats->qual.updated = IW_QUAL_ALL_UPDATED ;//|IW_QUAL_DBM; #else #ifdef RTK_DMP_PLATFORM //IW_QUAL_DBM= 0x8, if driver use this flag, wireless extension will show value of dbm. //remove this flag for show percentage 0~100 piwstats->qual.updated = 0x07; #else piwstats->qual.updated = 0x0f; #endif #endif #ifdef CONFIG_SIGNAL_DISPLAY_DBM piwstats->qual.updated = piwstats->qual.updated | IW_QUAL_DBM; #endif return &padapter->iwstats; } #endif struct iw_handler_def rtw_handlers_def = { .standard = rtw_handlers, .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler), .private = rtw_private_handler, .private_args = (struct iw_priv_args *)rtw_private_args, .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler), .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args), #if WIRELESS_EXT >= 17 .get_wireless_stats = rtw_get_wireless_stats, #endif };
gpl-2.0
gp-b2g/gp-peak-kernel
arch/arm/mach-msm/spm-v2.c
295
12014
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/msm_iomap.h> #include "spm_driver.h" #define MSM_SPM_PMIC_STATE_IDLE 0 #define SAW2_V1_VER_REG 0x04 #define SAW2_V2_VER_REG 0xfd0 #define SAW2_MAJOR_2 2 enum { MSM_SPM_DEBUG_SHADOW = 1U << 0, MSM_SPM_DEBUG_VCTL = 1U << 1, }; static int msm_spm_debug_mask; module_param_named( debug_mask, msm_spm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP ); static uint32_t msm_spm_reg_offsets_v1[MSM_SPM_REG_NR] = { [MSM_SPM_REG_SAW2_SECURE] = 0x00, [MSM_SPM_REG_SAW2_ID] = 0x04, [MSM_SPM_REG_SAW2_CFG] = 0x08, [MSM_SPM_REG_SAW2_STS0] = 0x0C, [MSM_SPM_REG_SAW2_STS1] = 0x10, [MSM_SPM_REG_SAW2_VCTL] = 0x14, [MSM_SPM_REG_SAW2_AVS_CTL] = 0x18, [MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x1C, [MSM_SPM_REG_SAW2_SPM_CTL] = 0x20, [MSM_SPM_REG_SAW2_PMIC_DLY] = 0x24, [MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x28, [MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x2C, [MSM_SPM_REG_SAW2_RST] = 0x30, [MSM_SPM_REG_SAW2_SEQ_ENTRY] = 0x80, }; static uint32_t msm_spm_reg_offsets_v2[MSM_SPM_REG_NR] = { [MSM_SPM_REG_SAW2_SECURE] = 0x00, [MSM_SPM_REG_SAW2_ID] = 0x04, [MSM_SPM_REG_SAW2_CFG] = 0x08, [MSM_SPM_REG_SAW2_SPM_STS] = 0x0C, [MSM_SPM_REG_SAW2_AVS_STS] = 0x10, [MSM_SPM_REG_SAW2_PMIC_STS] = 0x14, [MSM_SPM_REG_SAW2_RST] = 0x18, [MSM_SPM_REG_SAW2_VCTL] = 0x1C, [MSM_SPM_REG_SAW2_AVS_CTL] = 0x20, [MSM_SPM_REG_SAW2_AVS_LIMIT] = 0x24, [MSM_SPM_REG_SAW2_AVS_DLY] = 0x28, [MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x2C, [MSM_SPM_REG_SAW2_SPM_CTL] = 0x30, [MSM_SPM_REG_SAW2_SPM_DLY] = 0x34, [MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x40, [MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x44, [MSM_SPM_REG_SAW2_PMIC_DATA_2] = 0x48, [MSM_SPM_REG_SAW2_PMIC_DATA_3] = 0x4C, [MSM_SPM_REG_SAW2_PMIC_DATA_4] = 0x50, [MSM_SPM_REG_SAW2_PMIC_DATA_5] = 0x54, [MSM_SPM_REG_SAW2_PMIC_DATA_6] = 0x58, [MSM_SPM_REG_SAW2_PMIC_DATA_7] = 0x5C, [MSM_SPM_REG_SAW2_SEQ_ENTRY] = 0x80, [MSM_SPM_REG_SAW2_VERSION] = 0xFD0, }; /****************************************************************************** * Internal helper functions *****************************************************************************/ static inline uint32_t msm_spm_drv_get_num_spm_entry( struct msm_spm_driver_data *dev) { return 32; } static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev, unsigned int reg_index) { __raw_writel(dev->reg_shadow[reg_index], dev->reg_base_addr + dev->reg_offsets[reg_index]); } static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev, unsigned int reg_index) { dev->reg_shadow[reg_index] = __raw_readl(dev->reg_base_addr + dev->reg_offsets[reg_index]); } static inline void msm_spm_drv_set_start_addr( struct msm_spm_driver_data *dev, uint32_t addr) { addr &= 0x7F; addr <<= 4; dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] &= 0xFFFFF80F; dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] |= addr; } static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev) { msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_ID); if (dev->major == SAW2_MAJOR_2) return (dev->reg_shadow[MSM_SPM_REG_SAW2_ID] >> 2) & 0x1; else return (dev->reg_shadow[MSM_SPM_REG_SAW2_ID] >> 18) & 0x1; } static inline void msm_spm_drv_set_vctl(struct msm_spm_driver_data *dev, uint32_t vlevel) { dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] &= ~0xFF; dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] |= vlevel; dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_0] &= ~0xFF; dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_0] |= vlevel; dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_1] &= ~0x3F; dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_1] |= (vlevel & 0x3F); } static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev, uint32_t vlevel) { unsigned int pmic_data = 0; pmic_data |= vlevel; pmic_data |= (dev->vctl_port & 0x7) << 16; dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] &= ~0x700FF; dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] |= pmic_data; dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_0] &= ~0x700FF; dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_0] |= pmic_data; } static inline void msm_spm_drv_apcs_set_vctl(struct msm_spm_driver_data *dev, unsigned int vlevel) { if (dev->major == SAW2_MAJOR_2) return msm_spm_drv_set_vctl2(dev, vlevel); else return msm_spm_drv_set_vctl(dev, vlevel); } static inline uint32_t msm_spm_drv_get_sts_pmic_state( struct msm_spm_driver_data *dev) { if (dev->major == SAW2_MAJOR_2) { msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_PMIC_STS); return (dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_STS] >> 16) & 0x03; } else { msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_STS0); return (dev->reg_shadow[MSM_SPM_REG_SAW2_STS0] >> 10) & 0x03; } } static inline uint32_t msm_spm_drv_get_sts_curr_pmic_data( struct msm_spm_driver_data *dev) { if (dev->major == SAW2_MAJOR_2) { msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_PMIC_STS); return dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_STS] & 0xFF; } else { msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_STS1); return dev->reg_shadow[MSM_SPM_REG_SAW2_STS1] & 0xFF; } } static inline uint32_t msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev, uint32_t *major, uint32_t *minor) { int ret = -ENODEV; uint32_t val = 0; msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_VERSION); val = dev->reg_shadow[MSM_SPM_REG_SAW2_VERSION]; if (dev->ver_reg == SAW2_V2_VER_REG) { *major = (val >> 28) & 0xF; *minor = (val >> 16) & 0xFFF; ret = 0; } else if (dev->ver_reg == SAW2_V1_VER_REG) { *major = (val >> 4) & 0xF; *minor = val & 0xF; ret = 0; } return ret; } /****************************************************************************** * Public functions *****************************************************************************/ inline int msm_spm_drv_set_spm_enable( struct msm_spm_driver_data *dev, bool enable) { uint32_t value = enable ? 0x01 : 0x00; if (!dev) return -EINVAL; if ((dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] & 0x01) ^ value) { dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] &= ~0x1; dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] |= value; msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_SPM_CTL); wmb(); } return 0; } void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev) { int i; int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev); if (!dev) { __WARN(); return; } for (i = 0; i < num_spm_entry; i++) { __raw_writel(dev->reg_seq_entry_shadow[i], dev->reg_base_addr + dev->reg_offsets[MSM_SPM_REG_SAW2_SEQ_ENTRY] + 4 * i); } mb(); } int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev, uint8_t *cmd, uint32_t *offset) { uint32_t cmd_w; uint32_t offset_w = *offset / 4; uint8_t last_cmd; if (!cmd) return -EINVAL; while (1) { int i; cmd_w = 0; last_cmd = 0; cmd_w = dev->reg_seq_entry_shadow[offset_w]; for (i = (*offset % 4) ; i < 4; i++) { last_cmd = *(cmd++); cmd_w |= last_cmd << (i * 8); (*offset)++; if (last_cmd == 0x0f) break; } dev->reg_seq_entry_shadow[offset_w++] = cmd_w; if (last_cmd == 0x0f) break; } return 0; } int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev, uint32_t addr) { /* SPM is configured to reset start address to zero after end of Program */ if (!dev) return -EINVAL; msm_spm_drv_set_start_addr(dev, addr); msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_SPM_CTL); wmb(); if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) { int i; for (i = 0; i < MSM_SPM_REG_NR; i++) pr_info("%s: reg %02x = 0x%08x\n", __func__, dev->reg_offsets[i], dev->reg_shadow[i]); } return 0; } int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel) { uint32_t timeout_us; if (!dev) return -EINVAL; if (!msm_spm_pmic_arb_present(dev)) return -ENOSYS; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: requesting vlevel 0x%x\n", __func__, vlevel); msm_spm_drv_apcs_set_vctl(dev, vlevel); msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_VCTL); msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_PMIC_DATA_0); msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_PMIC_DATA_1); mb(); /* Wait for PMIC state to return to idle or until timeout */ timeout_us = dev->vctl_timeout_us; while (msm_spm_drv_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_vdd_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } } if (msm_spm_drv_get_sts_curr_pmic_data(dev) != vlevel) goto set_vdd_bail; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: done, remaining timeout %uus\n", __func__, timeout_us); return 0; set_vdd_bail: pr_err("%s: failed, remaining timeout %uus, vlevel 0x%x\n", __func__, timeout_us, msm_spm_drv_get_sts_curr_pmic_data(dev)); return -EIO; } int msm_spm_drv_set_phase(struct msm_spm_driver_data *dev, unsigned int phase_cnt) { unsigned int pmic_data = 0; unsigned int timeout_us = 0; if (dev->major != SAW2_MAJOR_2) return -ENODEV; pmic_data |= phase_cnt & 0xFF; pmic_data |= (dev->phase_port & 0x7) << 16; dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] &= ~0x700FF; dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] |= pmic_data; msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_VCTL); mb(); /* Wait for PMIC state to return to idle or until timeout */ timeout_us = dev->vctl_timeout_us; while (msm_spm_drv_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_phase_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } } if (msm_spm_drv_get_sts_curr_pmic_data(dev) != phase_cnt) goto set_phase_bail; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: done, remaining timeout %uus\n", __func__, timeout_us); return 0; set_phase_bail: pr_err("%s: failed, remaining timeout %uus, phase count %d\n", __func__, timeout_us, msm_spm_drv_get_sts_curr_pmic_data(dev)); return -EIO; } void msm_spm_drv_reinit(struct msm_spm_driver_data *dev) { int i; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_drv_flush_shadow(dev, i); msm_spm_drv_flush_seq_entry(dev); mb(); } int __devinit msm_spm_drv_init(struct msm_spm_driver_data *dev, struct msm_spm_platform_data *data) { int i; int num_spm_entry; BUG_ON(!dev || !data); if (dev->ver_reg == SAW2_V2_VER_REG) dev->reg_offsets = msm_spm_reg_offsets_v2; else dev->reg_offsets = msm_spm_reg_offsets_v1; dev->vctl_port = data->vctl_port; dev->phase_port = data->phase_port; dev->reg_base_addr = data->reg_base_addr; memcpy(dev->reg_shadow, data->reg_init_values, sizeof(data->reg_init_values)); dev->vctl_timeout_us = data->vctl_timeout_us; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_drv_flush_shadow(dev, i); /* barrier to ensure write completes before we update shadow * registers */ mb(); for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_drv_load_shadow(dev, i); /* barrier to ensure read completes before we proceed further*/ mb(); msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor); num_spm_entry = msm_spm_drv_get_num_spm_entry(dev); dev->reg_seq_entry_shadow = kzalloc(sizeof(*dev->reg_seq_entry_shadow) * num_spm_entry, GFP_KERNEL); if (!dev->reg_seq_entry_shadow) return -ENOMEM; return 0; }
gpl-2.0
tiny4579/tiny-gingersense
drivers/gpu/drm/nouveau/nv40_fifo.c
807
10467
/* * Copyright (C) 2007 Ben Skeggs. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_drm.h" #define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE)) #define NV40_RAMFC__SIZE 128 int nv40_fifo_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fc = NV40_RAMFC(chan->id); unsigned long flags; int ret; ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); if (ret) return ret; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); dev_priv->engine.instmem.prepare_access(dev, true); nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | #ifdef __BIG_ENDIAN NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0x30000000 /* no idea.. */); nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); nv_wi32(dev, fc + 60, 0x0001FFFF); dev_priv->engine.instmem.finish_access(dev); /* enable the fifo dma operation */ nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } void nv40_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); if (chan->ramfc) nouveau_gpuobj_ref_del(dev, &chan->ramfc); } static void nv40_fifo_do_load_context(struct drm_device *dev, int chid) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; dev_priv->engine.instmem.prepare_access(dev, false); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20)); /* No idea what 0x2058 is.. */ tmp = nv_ri32(dev, fc + 24); tmp2 = nv_rd32(dev, 0x2058) & 0xFFF; tmp2 |= (tmp & 0x30000000); nv_wr32(dev, 0x2058, tmp2); tmp &= ~0x30000000; nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp); nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28)); nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32)); nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36)); tmp = nv_ri32(dev, fc + 40); nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44)); nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48)); nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52)); nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56)); /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF; nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp); nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64)); /* NVIDIA does this next line twice... */ nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); dev_priv->engine.instmem.finish_access(dev); nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); } int nv40_fifo_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; uint32_t tmp; nv40_fifo_do_load_context(dev, chan->id); /* Set channel active, and in DMA mode */ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); /* Reset DMA_CTL_AT_INFO to INVALID */ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); return 0; } int nv40_fifo_unload_context(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; uint32_t fc, tmp; int chid; chid = pfifo->channel_id(dev); if (chid < 0 || chid >= dev_priv->engine.fifo.channels) return 0; fc = NV40_RAMFC(chid); dev_priv->engine.instmem.prepare_access(dev, true); nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE)); nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT)); nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH); tmp |= nv_rd32(dev, 0x2058) & 0x30000000; nv_wi32(dev, fc + 24, tmp); nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); nv_wi32(dev, fc + 40, tmp); nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something * more involved depending on the value of 0x3228? */ nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE)); nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff); /* No idea what the below is for exactly, ripped from a mmio-trace */ nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4)); /* NVIDIA do this next line twice.. bug? */ nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8)); nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088)); nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300)); #if 0 /* no real idea which is PUT/GET in UNK_48.. */ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET); tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); nv_wi32(dev, fc + 72, tmp); #endif dev_priv->engine.instmem.finish_access(dev); nv40_fifo_do_load_context(dev, pfifo->channels - 1); nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1)); return 0; } static void nv40_fifo_init_reset(struct drm_device *dev) { int i; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); nv_wr32(dev, 0x003224, 0x000f0078); nv_wr32(dev, 0x003210, 0x00000000); nv_wr32(dev, 0x003270, 0x00000000); nv_wr32(dev, 0x003240, 0x00000000); nv_wr32(dev, 0x003244, 0x00000000); nv_wr32(dev, 0x003258, 0x00000000); nv_wr32(dev, 0x002504, 0x00000000); for (i = 0; i < 16; i++) nv_wr32(dev, 0x002510 + (i * 4), 0x00000000); nv_wr32(dev, 0x00250c, 0x0000ffff); nv_wr32(dev, 0x002048, 0x00000000); nv_wr32(dev, 0x003228, 0x00000000); nv_wr32(dev, 0x0032e8, 0x00000000); nv_wr32(dev, 0x002410, 0x00000000); nv_wr32(dev, 0x002420, 0x00000000); nv_wr32(dev, 0x002058, 0x00000001); nv_wr32(dev, 0x00221c, 0x00000000); /* something with 0x2084, read/modify/write, no change */ nv_wr32(dev, 0x002040, 0x000000ff); nv_wr32(dev, 0x002500, 0x00000000); nv_wr32(dev, 0x003200, 0x00000000); nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); } static void nv40_fifo_init_ramxx(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | ((dev_priv->ramht_bits - 9) << 16) | (dev_priv->ramht_offset >> 8)); nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); switch (dev_priv->chipset) { case 0x47: case 0x49: case 0x4b: nv_wr32(dev, 0x2230, 1); break; default: break; } switch (dev_priv->chipset) { case 0x40: case 0x41: case 0x42: case 0x43: case 0x45: case 0x47: case 0x48: case 0x49: case 0x4b: nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002); break; default: nv_wr32(dev, 0x2230, 0); nv_wr32(dev, NV40_PFIFO_RAMFC, ((dev_priv->vram_size - 512 * 1024 + dev_priv->ramfc_offset) >> 16) | (3 << 16)); break; } } static void nv40_fifo_init_intr(struct drm_device *dev) { nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } int nv40_fifo_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; int i; nv40_fifo_init_reset(dev); nv40_fifo_init_ramxx(dev); nv40_fifo_do_load_context(dev, pfifo->channels - 1); nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); nv40_fifo_init_intr(dev); pfifo->enable(dev); pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { if (dev_priv->fifos[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } } return 0; }
gpl-2.0
panda-z/android_kernel_hp_mantaray
mm/thrash.c
1831
1967
/* * mm/thrash.c * * Copyright (C) 2004, Red Hat, Inc. * Copyright (C) 2004, Rik van Riel <riel@redhat.com> * Released under the GPL, see the file COPYING for details. * * Simple token based thrashing protection, using the algorithm * described in: http://www.cs.wm.edu/~sjiang/token.pdf * * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com> * Improved algorithm to pass token: * Each task has a priority which is incremented if it contended * for the token in an interval less than its previous attempt. * If the token is acquired, that task's priority is boosted to prevent * the token from bouncing around too often and to let the task make * some progress in its execution. */ #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/swap.h> static DEFINE_SPINLOCK(swap_token_lock); struct mm_struct *swap_token_mm; static unsigned int global_faults; void grab_swap_token(struct mm_struct *mm) { int current_interval; global_faults++; current_interval = global_faults - mm->faultstamp; if (!spin_trylock(&swap_token_lock)) return; /* First come first served */ if (swap_token_mm == NULL) { mm->token_priority = mm->token_priority + 2; swap_token_mm = mm; goto out; } if (mm != swap_token_mm) { if (current_interval < mm->last_interval) mm->token_priority++; else { if (likely(mm->token_priority > 0)) mm->token_priority--; } /* Check if we deserve the token */ if (mm->token_priority > swap_token_mm->token_priority) { mm->token_priority += 2; swap_token_mm = mm; } } else { /* Token holder came in again! */ mm->token_priority += 2; } out: mm->faultstamp = global_faults; mm->last_interval = current_interval; spin_unlock(&swap_token_lock); } /* Called on process exit. */ void __put_swap_token(struct mm_struct *mm) { spin_lock(&swap_token_lock); if (likely(mm == swap_token_mm)) swap_token_mm = NULL; spin_unlock(&swap_token_lock); }
gpl-2.0
mathkid95/linux_motorola_lollipop
drivers/clocksource/bcm_kona_timer.c
2087
5308
/* * Copyright (C) 2012 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/clockchips.h> #include <linux/types.h> #include <linux/io.h> #include <asm/mach/time.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #define KONA_GPTIMER_STCS_OFFSET 0x00000000 #define KONA_GPTIMER_STCLO_OFFSET 0x00000004 #define KONA_GPTIMER_STCHI_OFFSET 0x00000008 #define KONA_GPTIMER_STCM0_OFFSET 0x0000000C #define KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT 0 #define KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT 4 struct kona_bcm_timers { int tmr_irq; void __iomem *tmr_regs; }; static struct kona_bcm_timers timers; static u32 arch_timer_rate; /* * We use the peripheral timers for system tick, the cpu global timer for * profile tick */ static void kona_timer_disable_and_clear(void __iomem *base) { uint32_t reg; /* * clear and disable interrupts * We are using compare/match register 0 for our system interrupts */ reg = readl(base + KONA_GPTIMER_STCS_OFFSET); /* Clear compare (0) interrupt */ reg |= 1 << KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT; /* disable compare */ reg &= ~(1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT); writel(reg, base + KONA_GPTIMER_STCS_OFFSET); } static void kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) { void __iomem *base = IOMEM(timer_base); int loop_limit = 4; /* * Read 64-bit free running counter * 1. Read hi-word * 2. Read low-word * 3. Read hi-word again * 4.1 * if new hi-word is not equal to previously read hi-word, then * start from #1 * 4.2 * if new hi-word is equal to previously read hi-word then stop. */ while (--loop_limit) { *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET); *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET); if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET)) break; } if (!loop_limit) { pr_err("bcm_kona_timer: getting counter failed.\n"); pr_err(" Timer will be impacted\n"); } return; } static const struct of_device_id bcm_timer_ids[] __initconst = { {.compatible = "bcm,kona-timer"}, {}, }; static void __init kona_timers_init(void) { struct device_node *node; u32 freq; node = of_find_matching_node(NULL, bcm_timer_ids); if (!node) panic("No timer"); if (!of_property_read_u32(node, "clock-frequency", &freq)) arch_timer_rate = freq; else panic("clock-frequency not set in the .dts file"); /* Setup IRQ numbers */ timers.tmr_irq = irq_of_parse_and_map(node, 0); /* Setup IO addresses */ timers.tmr_regs = of_iomap(node, 0); kona_timer_disable_and_clear(timers.tmr_regs); } static int kona_timer_set_next_event(unsigned long clc, struct clock_event_device *unused) { /* * timer (0) is disabled by the timer interrupt already * so, here we reload the next event value and re-enable * the timer. * * This way, we are potentially losing the time between * timer-interrupt->set_next_event. CPU local timers, when * they come in should get rid of skew. */ uint32_t lsw, msw; uint32_t reg; kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); /* Load the "next" event tick value */ writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); /* Enable compare */ reg = readl(timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET); reg |= (1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT); writel(reg, timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET); return 0; } static void kona_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *unused) { switch (mode) { case CLOCK_EVT_MODE_ONESHOT: /* by default mode is one shot don't do any thing */ break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: default: kona_timer_disable_and_clear(timers.tmr_regs); } } static struct clock_event_device kona_clockevent_timer = { .name = "timer 1", .features = CLOCK_EVT_FEAT_ONESHOT, .set_next_event = kona_timer_set_next_event, .set_mode = kona_timer_set_mode }; static void __init kona_timer_clockevents_init(void) { kona_clockevent_timer.cpumask = cpumask_of(0); clockevents_config_and_register(&kona_clockevent_timer, arch_timer_rate, 6, 0xffffffff); } static irqreturn_t kona_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &kona_clockevent_timer; kona_timer_disable_and_clear(timers.tmr_regs); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction kona_timer_irq = { .name = "Kona Timer Tick", .flags = IRQF_TIMER, .handler = kona_timer_interrupt, }; static void __init kona_timer_init(void) { kona_timers_init(); kona_timer_clockevents_init(); setup_irq(timers.tmr_irq, &kona_timer_irq); kona_timer_set_next_event((arch_timer_rate / HZ), NULL); } CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init);
gpl-2.0
keks2293/kernel_zte
drivers/mmc/host/sdhci-of-hlwd.c
2087
2624
/* * drivers/mmc/host/sdhci-of-hlwd.c * * Nintendo Wii Secure Digital Host Controller Interface. * Copyright (C) 2009 The GameCube Linux Team * Copyright (C) 2009 Albert Herranz * * Based on sdhci-of-esdhc.c * * Copyright (c) 2007 Freescale Semiconductor, Inc. * Copyright (c) 2009 MontaVista Software, Inc. * * Authors: Xiaobo Xie <X.Xie@freescale.com> * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/mmc/host.h> #include "sdhci-pltfm.h" /* * Ops and quirks for the Nintendo Wii SDHCI controllers. */ /* * We need a small delay after each write, or things go horribly wrong. */ #define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */ static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg) { sdhci_be32bs_writel(host, val, reg); udelay(SDHCI_HLWD_WRITE_DELAY); } static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg) { sdhci_be32bs_writew(host, val, reg); udelay(SDHCI_HLWD_WRITE_DELAY); } static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) { sdhci_be32bs_writeb(host, val, reg); udelay(SDHCI_HLWD_WRITE_DELAY); } static const struct sdhci_ops sdhci_hlwd_ops = { .read_l = sdhci_be32bs_readl, .read_w = sdhci_be32bs_readw, .read_b = sdhci_be32bs_readb, .write_l = sdhci_hlwd_writel, .write_w = sdhci_hlwd_writew, .write_b = sdhci_hlwd_writeb, }; static const struct sdhci_pltfm_data sdhci_hlwd_pdata = { .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE, .ops = &sdhci_hlwd_ops, }; static int sdhci_hlwd_probe(struct platform_device *pdev) { return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata); } static int sdhci_hlwd_remove(struct platform_device *pdev) { return sdhci_pltfm_unregister(pdev); } static const struct of_device_id sdhci_hlwd_of_match[] = { { .compatible = "nintendo,hollywood-sdhci" }, { } }; MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match); static struct platform_driver sdhci_hlwd_driver = { .driver = { .name = "sdhci-hlwd", .owner = THIS_MODULE, .of_match_table = sdhci_hlwd_of_match, .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_hlwd_probe, .remove = sdhci_hlwd_remove, }; module_platform_driver(sdhci_hlwd_driver); MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); MODULE_LICENSE("GPL v2");
gpl-2.0
dev-harsh1998/Phantom-Jalebi
net/batman-adv/vis.c
2087
25846
/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors: * * Simon Wunderlich * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA */ #include "main.h" #include "send.h" #include "translation-table.h" #include "vis.h" #include "soft-interface.h" #include "hard-interface.h" #include "hash.h" #include "originator.h" #define BATADV_MAX_VIS_PACKET_SIZE 1000 /* hash class keys */ static struct lock_class_key batadv_vis_hash_lock_class_key; /* free the info */ static void batadv_free_info(struct kref *ref) { struct batadv_vis_info *info; struct batadv_priv *bat_priv; struct batadv_vis_recvlist_node *entry, *tmp; info = container_of(ref, struct batadv_vis_info, refcount); bat_priv = info->bat_priv; list_del_init(&info->send_list); spin_lock_bh(&bat_priv->vis.list_lock); list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { list_del(&entry->list); kfree(entry); } spin_unlock_bh(&bat_priv->vis.list_lock); kfree_skb(info->skb_packet); kfree(info); } /* Compare two vis packets, used by the hashing algorithm */ static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2) { const struct batadv_vis_info *d1, *d2; const struct batadv_vis_packet *p1, *p2; d1 = container_of(node, struct batadv_vis_info, hash_entry); d2 = data2; p1 = (struct batadv_vis_packet *)d1->skb_packet->data; p2 = (struct batadv_vis_packet *)d2->skb_packet->data; return batadv_compare_eth(p1->vis_orig, p2->vis_orig); } /* hash function to choose an entry in a hash table of given size * hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size) { const struct batadv_vis_info *vis_info = data; const struct batadv_vis_packet *packet; const unsigned char *key; uint32_t hash = 0; size_t i; packet = (struct batadv_vis_packet *)vis_info->skb_packet->data; key = packet->vis_orig; for (i = 0; i < ETH_ALEN; i++) { hash += key[i]; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash % size; } static struct batadv_vis_info * batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) { struct batadv_hashtable *hash = bat_priv->vis.hash; struct hlist_head *head; struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; uint32_t index; if (!hash) return NULL; index = batadv_vis_info_choose(data, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(vis_info, head, hash_entry) { if (!batadv_vis_info_cmp(&vis_info->hash_entry, data)) continue; vis_info_tmp = vis_info; break; } rcu_read_unlock(); return vis_info_tmp; } /* insert interface to the list of interfaces of one originator, if it * does not already exist in the list */ static void batadv_vis_data_insert_interface(const uint8_t *interface, struct hlist_head *if_list, bool primary) { struct batadv_vis_if_list_entry *entry; hlist_for_each_entry(entry, if_list, list) { if (batadv_compare_eth(entry->addr, interface)) return; } /* it's a new address, add it to the list */ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return; memcpy(entry->addr, interface, ETH_ALEN); entry->primary = primary; hlist_add_head(&entry->list, if_list); } static void batadv_vis_data_read_prim_sec(struct seq_file *seq, const struct hlist_head *if_list) { struct batadv_vis_if_list_entry *entry; hlist_for_each_entry(entry, if_list, list) { if (entry->primary) seq_puts(seq, "PRIMARY, "); else seq_printf(seq, "SEC %pM, ", entry->addr); } } /* read an entry */ static ssize_t batadv_vis_data_read_entry(struct seq_file *seq, const struct batadv_vis_info_entry *entry, const uint8_t *src, bool primary) { if (primary && entry->quality == 0) return seq_printf(seq, "TT %pM, ", entry->dest); else if (batadv_compare_eth(entry->src, src)) return seq_printf(seq, "TQ %pM %d, ", entry->dest, entry->quality); return 0; } static void batadv_vis_data_insert_interfaces(struct hlist_head *list, struct batadv_vis_packet *packet, struct batadv_vis_info_entry *entries) { int i; for (i = 0; i < packet->entries; i++) { if (entries[i].quality == 0) continue; if (batadv_compare_eth(entries[i].src, packet->vis_orig)) continue; batadv_vis_data_insert_interface(entries[i].src, list, false); } } static void batadv_vis_data_read_entries(struct seq_file *seq, struct hlist_head *list, struct batadv_vis_packet *packet, struct batadv_vis_info_entry *entries) { int i; struct batadv_vis_if_list_entry *entry; hlist_for_each_entry(entry, list, list) { seq_printf(seq, "%pM,", entry->addr); for (i = 0; i < packet->entries; i++) batadv_vis_data_read_entry(seq, &entries[i], entry->addr, entry->primary); /* add primary/secondary records */ if (batadv_compare_eth(entry->addr, packet->vis_orig)) batadv_vis_data_read_prim_sec(seq, list); seq_puts(seq, "\n"); } } static void batadv_vis_seq_print_text_bucket(struct seq_file *seq, const struct hlist_head *head) { struct batadv_vis_info *info; struct batadv_vis_packet *packet; uint8_t *entries_pos; struct batadv_vis_info_entry *entries; struct batadv_vis_if_list_entry *entry; struct hlist_node *n; HLIST_HEAD(vis_if_list); hlist_for_each_entry_rcu(info, head, hash_entry) { packet = (struct batadv_vis_packet *)info->skb_packet->data; entries_pos = (uint8_t *)packet + sizeof(*packet); entries = (struct batadv_vis_info_entry *)entries_pos; batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list, true); batadv_vis_data_insert_interfaces(&vis_if_list, packet, entries); batadv_vis_data_read_entries(seq, &vis_if_list, packet, entries); hlist_for_each_entry_safe(entry, n, &vis_if_list, list) { hlist_del(&entry->list); kfree(entry); } } } int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) { struct batadv_hard_iface *primary_if; struct hlist_head *head; struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hashtable *hash = bat_priv->vis.hash; uint32_t i; int ret = 0; int vis_server = atomic_read(&bat_priv->vis_mode); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE) goto out; spin_lock_bh(&bat_priv->vis.hash_lock); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; batadv_vis_seq_print_text_bucket(seq, head); } spin_unlock_bh(&bat_priv->vis.hash_lock); out: if (primary_if) batadv_hardif_free_ref(primary_if); return ret; } /* add the info packet to the send list, if it was not * already linked in. */ static void batadv_send_list_add(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { if (list_empty(&info->send_list)) { kref_get(&info->refcount); list_add_tail(&info->send_list, &bat_priv->vis.send_list); } } /* delete the info packet from the send list, if it was * linked in. */ static void batadv_send_list_del(struct batadv_vis_info *info) { if (!list_empty(&info->send_list)) { list_del_init(&info->send_list); kref_put(&info->refcount, batadv_free_info); } } /* tries to add one entry to the receive list. */ static void batadv_recv_list_add(struct batadv_priv *bat_priv, struct list_head *recv_list, const char *mac) { struct batadv_vis_recvlist_node *entry; entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return; memcpy(entry->mac, mac, ETH_ALEN); spin_lock_bh(&bat_priv->vis.list_lock); list_add_tail(&entry->list, recv_list); spin_unlock_bh(&bat_priv->vis.list_lock); } /* returns 1 if this mac is in the recv_list */ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv, const struct list_head *recv_list, const char *mac) { const struct batadv_vis_recvlist_node *entry; spin_lock_bh(&bat_priv->vis.list_lock); list_for_each_entry(entry, recv_list, list) { if (batadv_compare_eth(entry->mac, mac)) { spin_unlock_bh(&bat_priv->vis.list_lock); return 1; } } spin_unlock_bh(&bat_priv->vis.list_lock); return 0; } /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, * broken.. ). vis hash must be locked outside. is_new is set when the packet * is newer than old entries in the hash. */ static struct batadv_vis_info * batadv_add_packet(struct batadv_priv *bat_priv, struct batadv_vis_packet *vis_packet, int vis_info_len, int *is_new, int make_broadcast) { struct batadv_vis_info *info, *old_info; struct batadv_vis_packet *search_packet, *old_packet; struct batadv_vis_info search_elem; struct batadv_vis_packet *packet; struct sk_buff *tmp_skb; int hash_added; size_t len; size_t max_entries; *is_new = 0; /* sanity check */ if (!bat_priv->vis.hash) return NULL; /* see if the packet is already in vis_hash */ search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet)); if (!search_elem.skb_packet) return NULL; len = sizeof(*search_packet); tmp_skb = search_elem.skb_packet; search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len); memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); old_info = batadv_vis_hash_find(bat_priv, &search_elem); kfree_skb(search_elem.skb_packet); if (old_info) { tmp_skb = old_info->skb_packet; old_packet = (struct batadv_vis_packet *)tmp_skb->data; if (!batadv_seq_after(ntohl(vis_packet->seqno), ntohl(old_packet->seqno))) { if (old_packet->seqno == vis_packet->seqno) { batadv_recv_list_add(bat_priv, &old_info->recv_list, vis_packet->sender_orig); return old_info; } else { /* newer packet is already in hash. */ return NULL; } } /* remove old entry */ batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp, batadv_vis_info_choose, old_info); batadv_send_list_del(old_info); kref_put(&old_info->refcount, batadv_free_info); } info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) return NULL; len = sizeof(*packet) + vis_info_len; info->skb_packet = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN); if (!info->skb_packet) { kfree(info); return NULL; } skb_reserve(info->skb_packet, ETH_HLEN + NET_IP_ALIGN); packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len); kref_init(&info->refcount); INIT_LIST_HEAD(&info->send_list); INIT_LIST_HEAD(&info->recv_list); info->first_seen = jiffies; info->bat_priv = bat_priv; memcpy(packet, vis_packet, len); /* initialize and add new packet. */ *is_new = 1; /* Make it a broadcast packet, if required */ if (make_broadcast) memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN); /* repair if entries is longer than packet. */ max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry); if (packet->entries > max_entries) packet->entries = max_entries; batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); /* try to add it */ hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp, batadv_vis_info_choose, info, &info->hash_entry); if (hash_added != 0) { /* did not work (for some reason) */ kref_put(&info->refcount, batadv_free_info); info = NULL; } return info; } /* handle the server sync packet, forward if needed. */ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv, struct batadv_vis_packet *vis_packet, int vis_info_len) { struct batadv_vis_info *info; int is_new, make_broadcast; int vis_server = atomic_read(&bat_priv->vis_mode); make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC); spin_lock_bh(&bat_priv->vis.hash_lock); info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, &is_new, make_broadcast); if (!info) goto end; /* only if we are server ourselves and packet is newer than the one in * hash. */ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new) batadv_send_list_add(bat_priv, info); end: spin_unlock_bh(&bat_priv->vis.hash_lock); } /* handle an incoming client update packet and schedule forward if needed. */ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, struct batadv_vis_packet *vis_packet, int vis_info_len) { struct batadv_vis_info *info; struct batadv_vis_packet *packet; int is_new; int vis_server = atomic_read(&bat_priv->vis_mode); int are_target = 0; /* clients shall not broadcast. */ if (is_broadcast_ether_addr(vis_packet->target_orig)) return; /* Are we the target for this VIS packet? */ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && batadv_is_my_mac(bat_priv, vis_packet->target_orig)) are_target = 1; spin_lock_bh(&bat_priv->vis.hash_lock); info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, &is_new, are_target); if (!info) goto end; /* note that outdated packets will be dropped at this point. */ packet = (struct batadv_vis_packet *)info->skb_packet->data; /* send only if we're the target server or ... */ if (are_target && is_new) { packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */ batadv_send_list_add(bat_priv, info); /* ... we're not the recipient (and thus need to forward). */ } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) { batadv_send_list_add(bat_priv, info); } end: spin_unlock_bh(&bat_priv->vis.hash_lock); } /* Walk the originators and find the VIS server with the best tq. Set the packet * address to its address and return the best_tq. * * Must be called with the originator hash locked */ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct batadv_neigh_node *router; struct hlist_head *head; struct batadv_orig_node *orig_node; struct batadv_vis_packet *packet; int best_tq = -1; uint32_t i; packet = (struct batadv_vis_packet *)info->skb_packet->data; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { router = batadv_orig_node_get_router(orig_node); if (!router) continue; if ((orig_node->flags & BATADV_VIS_SERVER) && (router->tq_avg > best_tq)) { best_tq = router->tq_avg; memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); } batadv_neigh_node_free_ref(router); } rcu_read_unlock(); } return best_tq; } /* Return true if the vis packet is full. */ static bool batadv_vis_packet_full(const struct batadv_vis_info *info) { const struct batadv_vis_packet *packet; size_t num; packet = (struct batadv_vis_packet *)info->skb_packet->data; num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry); if (num < packet->entries + 1) return true; return false; } /* generates a packet of own vis data, * returns 0 on success, -1 if no packet could be generated */ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; struct batadv_neigh_node *router; struct batadv_vis_info *info = bat_priv->vis.my_info; struct batadv_vis_packet *packet; struct batadv_vis_info_entry *entry; struct batadv_tt_common_entry *tt_common_entry; uint8_t *packet_pos; int best_tq = -1; uint32_t i; info->first_seen = jiffies; packet = (struct batadv_vis_packet *)info->skb_packet->data; packet->vis_type = atomic_read(&bat_priv->vis_mode); memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN); packet->header.ttl = BATADV_TTL; packet->seqno = htonl(ntohl(packet->seqno) + 1); packet->entries = 0; packet->reserved = 0; skb_trim(info->skb_packet, sizeof(*packet)); if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) { best_tq = batadv_find_best_vis_server(bat_priv, info); if (best_tq < 0) return best_tq; } for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { router = batadv_orig_node_get_router(orig_node); if (!router) continue; if (!batadv_compare_eth(router->addr, orig_node->orig)) goto next; if (router->if_incoming->if_status != BATADV_IF_ACTIVE) goto next; if (router->tq_avg < 1) goto next; /* fill one entry into buffer. */ packet_pos = skb_put(info->skb_packet, sizeof(*entry)); entry = (struct batadv_vis_info_entry *)packet_pos; memcpy(entry->src, router->if_incoming->net_dev->dev_addr, ETH_ALEN); memcpy(entry->dest, orig_node->orig, ETH_ALEN); entry->quality = router->tq_avg; packet->entries++; next: batadv_neigh_node_free_ref(router); if (batadv_vis_packet_full(info)) goto unlock; } rcu_read_unlock(); } hash = bat_priv->tt.local_hash; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) { packet_pos = skb_put(info->skb_packet, sizeof(*entry)); entry = (struct batadv_vis_info_entry *)packet_pos; memset(entry->src, 0, ETH_ALEN); memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN); entry->quality = 0; /* 0 means TT */ packet->entries++; if (batadv_vis_packet_full(info)) goto unlock; } rcu_read_unlock(); } return 0; unlock: rcu_read_unlock(); return 0; } /* free old vis packets. Must be called with this vis_hash_lock * held */ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) { uint32_t i; struct batadv_hashtable *hash = bat_priv->vis.hash; struct hlist_node *node_tmp; struct hlist_head *head; struct batadv_vis_info *info; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; hlist_for_each_entry_safe(info, node_tmp, head, hash_entry) { /* never purge own data. */ if (info == bat_priv->vis.my_info) continue; if (batadv_has_timed_out(info->first_seen, BATADV_VIS_TIMEOUT)) { hlist_del(&info->hash_entry); batadv_send_list_del(info); kref_put(&info->refcount, batadv_free_info); } } } } static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; struct batadv_vis_packet *packet; struct sk_buff *skb; uint32_t i; packet = (struct batadv_vis_packet *)info->skb_packet->data; /* send to all routers in range. */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { /* if it's a vis server and reachable, send it. */ if (!(orig_node->flags & BATADV_VIS_SERVER)) continue; /* don't send it if we already received the packet from * this node. */ if (batadv_recv_list_is_in(bat_priv, &info->recv_list, orig_node->orig)) continue; memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); skb = skb_clone(info->skb_packet, GFP_ATOMIC); if (!skb) continue; if (!batadv_send_skb_to_orig(skb, orig_node, NULL)) kfree_skb(skb); } rcu_read_unlock(); } } static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { struct batadv_orig_node *orig_node; struct sk_buff *skb; struct batadv_vis_packet *packet; packet = (struct batadv_vis_packet *)info->skb_packet->data; orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig); if (!orig_node) goto out; skb = skb_clone(info->skb_packet, GFP_ATOMIC); if (!skb) goto out; if (!batadv_send_skb_to_orig(skb, orig_node, NULL)) kfree_skb(skb); out: if (orig_node) batadv_orig_node_free_ref(orig_node); } /* only send one vis packet. called from batadv_send_vis_packets() */ static void batadv_send_vis_packet(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { struct batadv_hard_iface *primary_if; struct batadv_vis_packet *packet; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; packet = (struct batadv_vis_packet *)info->skb_packet->data; if (packet->header.ttl < 2) { pr_debug("Error - can't send vis packet: ttl exceeded\n"); goto out; } memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); packet->header.ttl--; if (is_broadcast_ether_addr(packet->target_orig)) batadv_broadcast_vis_packet(bat_priv, info); else batadv_unicast_vis_packet(bat_priv, info); packet->header.ttl++; /* restore TTL */ out: if (primary_if) batadv_hardif_free_ref(primary_if); } /* called from timer; send (and maybe generate) vis packet. */ static void batadv_send_vis_packets(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_priv *bat_priv; struct batadv_priv_vis *priv_vis; struct batadv_vis_info *info; delayed_work = container_of(work, struct delayed_work, work); priv_vis = container_of(delayed_work, struct batadv_priv_vis, work); bat_priv = container_of(priv_vis, struct batadv_priv, vis); spin_lock_bh(&bat_priv->vis.hash_lock); batadv_purge_vis_packets(bat_priv); if (batadv_generate_vis_packet(bat_priv) == 0) { /* schedule if generation was successful */ batadv_send_list_add(bat_priv, bat_priv->vis.my_info); } while (!list_empty(&bat_priv->vis.send_list)) { info = list_first_entry(&bat_priv->vis.send_list, typeof(*info), send_list); kref_get(&info->refcount); spin_unlock_bh(&bat_priv->vis.hash_lock); batadv_send_vis_packet(bat_priv, info); spin_lock_bh(&bat_priv->vis.hash_lock); batadv_send_list_del(info); kref_put(&info->refcount, batadv_free_info); } spin_unlock_bh(&bat_priv->vis.hash_lock); queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work, msecs_to_jiffies(BATADV_VIS_INTERVAL)); } /* init the vis server. this may only be called when if_list is already * initialized (e.g. bat0 is initialized, interfaces have been added) */ int batadv_vis_init(struct batadv_priv *bat_priv) { struct batadv_vis_packet *packet; int hash_added; unsigned int len; unsigned long first_seen; struct sk_buff *tmp_skb; if (bat_priv->vis.hash) return 0; spin_lock_bh(&bat_priv->vis.hash_lock); bat_priv->vis.hash = batadv_hash_new(256); if (!bat_priv->vis.hash) { pr_err("Can't initialize vis_hash\n"); goto err; } batadv_hash_set_lock_class(bat_priv->vis.hash, &batadv_vis_hash_lock_class_key); bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); if (!bat_priv->vis.my_info) goto err; len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE; len += ETH_HLEN + NET_IP_ALIGN; bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len); if (!bat_priv->vis.my_info->skb_packet) goto free_info; skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN + NET_IP_ALIGN); tmp_skb = bat_priv->vis.my_info->skb_packet; packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); /* prefill the vis info */ first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL); bat_priv->vis.my_info->first_seen = first_seen; INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list); INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list); kref_init(&bat_priv->vis.my_info->refcount); bat_priv->vis.my_info->bat_priv = bat_priv; packet->header.version = BATADV_COMPAT_VERSION; packet->header.packet_type = BATADV_VIS; packet->header.ttl = BATADV_TTL; packet->seqno = 0; packet->reserved = 0; packet->entries = 0; INIT_LIST_HEAD(&bat_priv->vis.send_list); hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp, batadv_vis_info_choose, bat_priv->vis.my_info, &bat_priv->vis.my_info->hash_entry); if (hash_added != 0) { pr_err("Can't add own vis packet into hash\n"); /* not in hash, need to remove it manually. */ kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info); goto err; } spin_unlock_bh(&bat_priv->vis.hash_lock); INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets); queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work, msecs_to_jiffies(BATADV_VIS_INTERVAL)); return 0; free_info: kfree(bat_priv->vis.my_info); bat_priv->vis.my_info = NULL; err: spin_unlock_bh(&bat_priv->vis.hash_lock); batadv_vis_quit(bat_priv); return -ENOMEM; } /* Decrease the reference count on a hash item info */ static void batadv_free_info_ref(struct hlist_node *node, void *arg) { struct batadv_vis_info *info; info = container_of(node, struct batadv_vis_info, hash_entry); batadv_send_list_del(info); kref_put(&info->refcount, batadv_free_info); } /* shutdown vis-server */ void batadv_vis_quit(struct batadv_priv *bat_priv) { if (!bat_priv->vis.hash) return; cancel_delayed_work_sync(&bat_priv->vis.work); spin_lock_bh(&bat_priv->vis.hash_lock); /* properly remove, kill timers ... */ batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL); bat_priv->vis.hash = NULL; bat_priv->vis.my_info = NULL; spin_unlock_bh(&bat_priv->vis.hash_lock); }
gpl-2.0
sgs3/SGH-I747M_Kernel
drivers/usb/gadget/f_ecm.c
2599
23810
/* * f_ecm.c -- USB CDC Ethernet (ECM) link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define VERBOSE_DEBUG */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include "u_ether.h" /* * This function is a "CDC Ethernet Networking Control Model" (CDC ECM) * Ethernet link. The data transfer model is simple (packets sent and * received over bulk endpoints using normal short packet termination), * and the control model exposes various data and optional notifications. * * ECM is well standardized and (except for Microsoft) supported by most * operating systems with USB host support. It's the preferred interop * solution for Ethernet over USB, at least for firmware based solutions. * (Hardware solutions tend to be more minimalist.) A newer and simpler * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on. * * Note that ECM requires the use of "alternate settings" for its data * interface. This means that the set_alt() method has real work to do, * and also means that a get_alt() method is required. */ struct ecm_ep_descs { struct usb_endpoint_descriptor *in; struct usb_endpoint_descriptor *out; struct usb_endpoint_descriptor *notify; }; enum ecm_notify_state { ECM_NOTIFY_NONE, /* don't notify */ ECM_NOTIFY_CONNECT, /* issue CONNECT next */ ECM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */ }; struct f_ecm { struct gether port; u8 ctrl_id, data_id; char ethaddr[14]; struct ecm_ep_descs fs; struct ecm_ep_descs hs; struct usb_ep *notify; struct usb_endpoint_descriptor *notify_desc; struct usb_request *notify_req; u8 notify_state; bool is_open; /* FIXME is_open needs some irq-ish locking * ... possibly the same as port.ioport */ }; static inline struct f_ecm *func_to_ecm(struct usb_function *f) { return container_of(f, struct f_ecm, port.func); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ecm_bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ /* * Include the status endpoint if we can, even though it's optional. * * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. * * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even * if they ignore the connect/disconnect notifications that real aether * can provide. More advanced cdc configurations might want to support * encapsulated commands (vendor-specific, using control-OUT). */ #define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ #define ECM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ /* interface descriptor: */ static struct usb_interface_descriptor ecm_control_intf = { .bLength = sizeof ecm_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, .bInterfaceProtocol = USB_CDC_PROTO_NONE, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc ecm_header_desc = { .bLength = sizeof ecm_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_union_desc ecm_union_desc = { .bLength = sizeof(ecm_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; static struct usb_cdc_ether_desc ecm_desc = { .bLength = sizeof ecm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, /* this descriptor actually adds value, surprise! */ /* .iMACAddress = DYNAMIC */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), .wNumberMCFilters = cpu_to_le16(0), .bNumberPowerFilters = 0, }; /* the default data interface has no endpoints ... */ static struct usb_interface_descriptor ecm_data_nop_intf = { .bLength = sizeof ecm_data_nop_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; /* ... but the "real" data interface has two bulk endpoints */ static struct usb_interface_descriptor ecm_data_intf = { .bLength = sizeof ecm_data_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 1, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_ecm_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT), .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, }; static struct usb_endpoint_descriptor fs_ecm_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_ecm_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *ecm_fs_function[] = { /* CDC ECM control descriptors */ (struct usb_descriptor_header *) &ecm_control_intf, (struct usb_descriptor_header *) &ecm_header_desc, (struct usb_descriptor_header *) &ecm_union_desc, (struct usb_descriptor_header *) &ecm_desc, /* NOTE: status endpoint might need to be removed */ (struct usb_descriptor_header *) &fs_ecm_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ecm_data_nop_intf, (struct usb_descriptor_header *) &ecm_data_intf, (struct usb_descriptor_header *) &fs_ecm_in_desc, (struct usb_descriptor_header *) &fs_ecm_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_ecm_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT), .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, }; static struct usb_endpoint_descriptor hs_ecm_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_ecm_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *ecm_hs_function[] = { /* CDC ECM control descriptors */ (struct usb_descriptor_header *) &ecm_control_intf, (struct usb_descriptor_header *) &ecm_header_desc, (struct usb_descriptor_header *) &ecm_union_desc, (struct usb_descriptor_header *) &ecm_desc, /* NOTE: status endpoint might need to be removed */ (struct usb_descriptor_header *) &hs_ecm_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ecm_data_nop_intf, (struct usb_descriptor_header *) &ecm_data_intf, (struct usb_descriptor_header *) &hs_ecm_in_desc, (struct usb_descriptor_header *) &hs_ecm_out_desc, NULL, }; /* string descriptors: */ static struct usb_string ecm_string_defs[] = { [0].s = "CDC Ethernet Control Model (ECM)", [1].s = NULL /* DYNAMIC */, [2].s = "CDC Ethernet Data", { } /* end of list */ }; static struct usb_gadget_strings ecm_string_table = { .language = 0x0409, /* en-us */ .strings = ecm_string_defs, }; static struct usb_gadget_strings *ecm_strings[] = { &ecm_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static void ecm_do_notify(struct f_ecm *ecm) { struct usb_request *req = ecm->notify_req; struct usb_cdc_notification *event; struct usb_composite_dev *cdev = ecm->port.func.config->cdev; __le32 *data; int status; /* notification already in flight? */ if (!req) return; event = req->buf; switch (ecm->notify_state) { case ECM_NOTIFY_NONE: return; case ECM_NOTIFY_CONNECT: event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; if (ecm->is_open) event->wValue = cpu_to_le16(1); else event->wValue = cpu_to_le16(0); event->wLength = 0; req->length = sizeof *event; DBG(cdev, "notify connect %s\n", ecm->is_open ? "true" : "false"); ecm->notify_state = ECM_NOTIFY_SPEED; break; case ECM_NOTIFY_SPEED: event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; event->wValue = cpu_to_le16(0); event->wLength = cpu_to_le16(8); req->length = ECM_STATUS_BYTECOUNT; /* SPEED_CHANGE data is up/down speeds in bits/sec */ data = req->buf + sizeof *event; data[0] = cpu_to_le32(ecm_bitrate(cdev->gadget)); data[1] = data[0]; DBG(cdev, "notify speed %d\n", ecm_bitrate(cdev->gadget)); ecm->notify_state = ECM_NOTIFY_NONE; break; } event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(ecm->ctrl_id); ecm->notify_req = NULL; status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC); if (status < 0) { ecm->notify_req = req; DBG(cdev, "notify --> %d\n", status); } } static void ecm_notify(struct f_ecm *ecm) { /* NOTE on most versions of Linux, host side cdc-ethernet * won't listen for notifications until its netdevice opens. * The first notification then sits in the FIFO for a long * time, and the second one is queued. */ ecm->notify_state = ECM_NOTIFY_CONNECT; ecm_do_notify(ecm); } static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_ecm *ecm = req->context; struct usb_composite_dev *cdev = ecm->port.func.config->cdev; struct usb_cdc_notification *event = req->buf; switch (req->status) { case 0: /* no fault */ break; case -ECONNRESET: case -ESHUTDOWN: ecm->notify_state = ECM_NOTIFY_NONE; break; default: DBG(cdev, "event %02x --> %d\n", event->bNotificationType, req->status); break; } ecm->notify_req = req; ecm_do_notify(ecm); } static int ecm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_ecm *ecm = func_to_ecm(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_ETHERNET_PACKET_FILTER: /* see 6.2.30: no data, wIndex = interface, * wValue = packet filter bitmap */ if (w_length != 0 || w_index != ecm->ctrl_id) goto invalid; DBG(cdev, "packet filter %02x\n", w_value); /* REVISIT locking of cdc_filter. This assumes the UDC * driver won't have a concurrent packet TX irq running on * another CPU; or that if it does, this write is atomic... */ ecm->port.cdc_filter = w_value; value = 0; break; /* and optionally: * case USB_CDC_SEND_ENCAPSULATED_COMMAND: * case USB_CDC_GET_ENCAPSULATED_RESPONSE: * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS: * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_STATISTIC: */ default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) ERROR(cdev, "ecm req %02x.%02x response err %d\n", ctrl->bRequestType, ctrl->bRequest, value); } /* device either stalls (value < 0) or reports success */ return value; } static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_ecm *ecm = func_to_ecm(f); struct usb_composite_dev *cdev = f->config->cdev; /* Control interface has only altsetting 0 */ if (intf == ecm->ctrl_id) { if (alt != 0) goto fail; if (ecm->notify->driver_data) { VDBG(cdev, "reset ecm control %d\n", intf); usb_ep_disable(ecm->notify); } else { VDBG(cdev, "init ecm ctrl %d\n", intf); ecm->notify_desc = ep_choose(cdev->gadget, ecm->hs.notify, ecm->fs.notify); } usb_ep_enable(ecm->notify, ecm->notify_desc); ecm->notify->driver_data = ecm; /* Data interface has two altsettings, 0 and 1 */ } else if (intf == ecm->data_id) { if (alt > 1) goto fail; if (ecm->port.in_ep->driver_data) { DBG(cdev, "reset ecm\n"); gether_disconnect(&ecm->port); } if (!ecm->port.in) { DBG(cdev, "init ecm\n"); ecm->port.in = ep_choose(cdev->gadget, ecm->hs.in, ecm->fs.in); ecm->port.out = ep_choose(cdev->gadget, ecm->hs.out, ecm->fs.out); } /* CDC Ethernet only sends data in non-default altsettings. * Changing altsettings resets filters, statistics, etc. */ if (alt == 1) { struct net_device *net; /* Enable zlps by default for ECM conformance; * override for musb_hdrc (avoids txdma ovhead). */ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget) ); ecm->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate ecm\n"); net = gether_connect(&ecm->port); if (IS_ERR(net)) return PTR_ERR(net); } /* NOTE this can be a minor disagreement with the ECM spec, * which says speed notifications will "always" follow * connection notifications. But we allow one connect to * follow another (if the first is in flight), and instead * just guarantee that a speed notification is always sent. */ ecm_notify(ecm); } else goto fail; return 0; fail: return -EINVAL; } /* Because the data interface supports multiple altsettings, * this ECM function *MUST* implement a get_alt() method. */ static int ecm_get_alt(struct usb_function *f, unsigned intf) { struct f_ecm *ecm = func_to_ecm(f); if (intf == ecm->ctrl_id) return 0; return ecm->port.in_ep->driver_data ? 1 : 0; } static void ecm_disable(struct usb_function *f) { struct f_ecm *ecm = func_to_ecm(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "ecm deactivated\n"); if (ecm->port.in_ep->driver_data) gether_disconnect(&ecm->port); if (ecm->notify->driver_data) { usb_ep_disable(ecm->notify); ecm->notify->driver_data = NULL; ecm->notify_desc = NULL; } } /*-------------------------------------------------------------------------*/ /* * Callbacks let us notify the host about connect/disconnect when the * net device is opened or closed. * * For testing, note that link states on this side include both opened * and closed variants of: * * - disconnected/unconfigured * - configured but inactive (data alt 0) * - configured and active (data alt 1) * * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and * SET_INTERFACE (altsetting). Remember also that "configured" doesn't * imply the host is actually polling the notification endpoint, and * likewise that "active" doesn't imply it's actually using the data * endpoints for traffic. */ static void ecm_open(struct gether *geth) { struct f_ecm *ecm = func_to_ecm(&geth->func); DBG(ecm->port.func.config->cdev, "%s\n", __func__); ecm->is_open = true; ecm_notify(ecm); } static void ecm_close(struct gether *geth) { struct f_ecm *ecm = func_to_ecm(&geth->func); DBG(ecm->port.func.config->cdev, "%s\n", __func__); ecm->is_open = false; ecm_notify(ecm); } /*-------------------------------------------------------------------------*/ /* ethernet function driver setup/binding */ static int ecm_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_ecm *ecm = func_to_ecm(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; ecm->ctrl_id = status; ecm_control_intf.bInterfaceNumber = status; ecm_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; ecm->data_id = status; ecm_data_nop_intf.bInterfaceNumber = status; ecm_data_intf.bInterfaceNumber = status; ecm_union_desc.bSlaveInterface0 = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc); if (!ep) goto fail; ecm->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc); if (!ep) goto fail; ecm->port.out_ep = ep; ep->driver_data = cdev; /* claim */ /* NOTE: a status/notification endpoint is *OPTIONAL* but we * don't treat it that way. It's simpler, and some newer CDC * profiles (wireless handsets) no longer treat it as optional. */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc); if (!ep) goto fail; ecm->notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!ecm->notify_req) goto fail; ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL); if (!ecm->notify_req->buf) goto fail; ecm->notify_req->context = ecm; ecm->notify_req->complete = ecm_notify_complete; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(ecm_fs_function); if (!f->descriptors) goto fail; ecm->fs.in = usb_find_endpoint(ecm_fs_function, f->descriptors, &fs_ecm_in_desc); ecm->fs.out = usb_find_endpoint(ecm_fs_function, f->descriptors, &fs_ecm_out_desc); ecm->fs.notify = usb_find_endpoint(ecm_fs_function, f->descriptors, &fs_ecm_notify_desc); /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_ecm_in_desc.bEndpointAddress = fs_ecm_in_desc.bEndpointAddress; hs_ecm_out_desc.bEndpointAddress = fs_ecm_out_desc.bEndpointAddress; hs_ecm_notify_desc.bEndpointAddress = fs_ecm_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(ecm_hs_function); if (!f->hs_descriptors) goto fail; ecm->hs.in = usb_find_endpoint(ecm_hs_function, f->hs_descriptors, &hs_ecm_in_desc); ecm->hs.out = usb_find_endpoint(ecm_hs_function, f->hs_descriptors, &hs_ecm_out_desc); ecm->hs.notify = usb_find_endpoint(ecm_hs_function, f->hs_descriptors, &hs_ecm_notify_desc); } /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ ecm->port.open = ecm_open; ecm->port.close = ecm_close; DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", ecm->port.in_ep->name, ecm->port.out_ep->name, ecm->notify->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (ecm->notify_req) { kfree(ecm->notify_req->buf); usb_ep_free_request(ecm->notify, ecm->notify_req); } /* we might as well release our claims on endpoints */ if (ecm->notify) ecm->notify->driver_data = NULL; if (ecm->port.out) ecm->port.out_ep->driver_data = NULL; if (ecm->port.in) ecm->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_ecm *ecm = func_to_ecm(f); DBG(c->cdev, "ecm unbind\n"); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(ecm->notify_req->buf); usb_ep_free_request(ecm->notify, ecm->notify_req); ecm_string_defs[1].s = NULL; kfree(ecm); } /** * ecm_bind_config - add CDC Ethernet network link to a configuration * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) { struct f_ecm *ecm; int status; if (!can_support_ecm(c->cdev->gadget) || !ethaddr) return -EINVAL; /* maybe allocate device-global string IDs */ if (ecm_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; ecm_string_defs[0].id = status; ecm_control_intf.iInterface = status; /* data interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; ecm_string_defs[2].id = status; ecm_data_intf.iInterface = status; /* MAC address */ status = usb_string_id(c->cdev); if (status < 0) return status; ecm_string_defs[1].id = status; ecm_desc.iMACAddress = status; } /* allocate and initialize one new instance */ ecm = kzalloc(sizeof *ecm, GFP_KERNEL); if (!ecm) return -ENOMEM; /* export host's Ethernet address in CDC format */ snprintf(ecm->ethaddr, sizeof ecm->ethaddr, "%02X%02X%02X%02X%02X%02X", ethaddr[0], ethaddr[1], ethaddr[2], ethaddr[3], ethaddr[4], ethaddr[5]); ecm_string_defs[1].s = ecm->ethaddr; ecm->port.cdc_filter = DEFAULT_FILTER; ecm->port.func.name = "cdc_ethernet"; ecm->port.func.strings = ecm_strings; /* descriptors are per-instance copies */ ecm->port.func.bind = ecm_bind; ecm->port.func.unbind = ecm_unbind; ecm->port.func.set_alt = ecm_set_alt; ecm->port.func.get_alt = ecm_get_alt; ecm->port.func.setup = ecm_setup; ecm->port.func.disable = ecm_disable; status = usb_add_function(c, &ecm->port.func); if (status) { ecm_string_defs[1].s = NULL; kfree(ecm); } return status; }
gpl-2.0
jerdog/android_kernel_oppo_find7
drivers/net/wireless/bcmdhd/dhd_linux_sched.c
3623
1604
/* * Expose some of the kernel scheduler routines * * Copyright (C) 1999-2012, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_linux_sched.c 291086 2011-10-21 01:17:24Z $ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <typedefs.h> #include <linuxver.h> int setScheduler(struct task_struct *p, int policy, struct sched_param *param) { int rc = 0; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) rc = sched_setscheduler(p, policy, param); #endif /* LinuxVer */ return rc; }
gpl-2.0
Li-poly/stock-multi-hack
arch/powerpc/kernel/signal.c
4391
5191
/* * Common signal handling code for both 32 and 64 bits * * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration * Extracted from signal_32.c and signal_64.c * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/tracehook.h> #include <linux/signal.h> #include <linux/key.h> #include <asm/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/debug.h> #include "signal.h" /* Log an error when sending an unhandled signal to a process. Controlled * through debug.exception-trace sysctl. */ int show_unhandled_signals = 0; /* * Allocate space for the signal frame */ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, int is_32) { unsigned long oldsp, newsp; /* Default to using normal stack */ oldsp = get_clean_sp(regs, is_32); /* Check for alt stack */ if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size && !on_sig_stack(oldsp)) oldsp = (current->sas_ss_sp + current->sas_ss_size); /* Get aligned frame */ newsp = (oldsp - frame_size) & ~0xFUL; /* Check access */ if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) return NULL; return (void __user *)newsp; } /* * Restore the user process's signal mask */ void restore_sigmask(sigset_t *set) { sigdelsetmask(set, ~_BLOCKABLE); set_current_blocked(set); } static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { unsigned long ret = regs->gpr[3]; int restart = 1; /* syscall ? */ if (TRAP(regs) != 0x0C00) return; /* error signalled ? */ if (!(regs->ccr & 0x10000000)) return; switch (ret) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: /* ERESTARTNOHAND means that the syscall should only be * restarted if there was no handler for the signal, and since * we only get here if there is a handler, we dont restart. */ restart = !has_handler; break; case ERESTARTSYS: /* ERESTARTSYS means to restart the syscall if there is no * handler or the handler was registered with SA_RESTART */ restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; break; case ERESTARTNOINTR: /* ERESTARTNOINTR means that the syscall should be * called again after the signal handler returns. */ break; default: return; } if (restart) { if (ret == ERESTART_RESTARTBLOCK) regs->gpr[0] = __NR_restart_syscall; else regs->gpr[3] = regs->orig_gpr3; regs->nip -= 4; regs->result = 0; } else { regs->result = -EINTR; regs->gpr[3] = EINTR; regs->ccr |= 0x10000000; } } static int do_signal(struct pt_regs *regs) { sigset_t *oldset; siginfo_t info; int signr; struct k_sigaction ka; int ret; int is32 = is_32bit_task(); if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* Is there any syscall restart business here ? */ check_syscall_restart(regs, &ka, signr > 0); if (signr <= 0) { struct thread_info *ti = current_thread_info(); /* No signal to deliver -- put the saved sigmask back */ if (ti->local_flags & _TLF_RESTORE_SIGMASK) { ti->local_flags &= ~_TLF_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } regs->trap = 0; return 0; /* no signals delivered */ } #ifndef CONFIG_PPC_ADV_DEBUG_REGS /* * Reenable the DABR before delivering the signal to * user space. The DABR will have been cleared if it * triggered inside the kernel. */ if (current->thread.dabr) set_dabr(current->thread.dabr); #endif /* Re-enable the breakpoints for the signal stack */ thread_change_pc(current, regs); if (is32) { if (ka.sa.sa_flags & SA_SIGINFO) ret = handle_rt_signal32(signr, &ka, &info, oldset, regs); else ret = handle_signal32(signr, &ka, &info, oldset, regs); } else { ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); } regs->trap = 0; if (ret) { block_sigmask(&ka, signr); /* * A signal was successfully delivered; the saved sigmask is in * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. */ current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; /* * Let tracing know that we've done the handler setup. */ tracehook_signal_handler(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); } return ret; } void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } } long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { return do_sigaltstack(uss, uoss, regs->gpr[1]); }
gpl-2.0
SoloProject/platform_kernel_lge_hammerhead
net/netfilter/nf_conntrack_timestamp.c
4903
2766
/* * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation (or any later at your option). */ #include <linux/netfilter.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_timestamp.h> static bool nf_ct_tstamp __read_mostly; module_param_named(tstamp, nf_ct_tstamp, bool, 0644); MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping."); #ifdef CONFIG_SYSCTL static struct ctl_table tstamp_sysctl_table[] = { { .procname = "nf_conntrack_timestamp", .data = &init_net.ct.sysctl_tstamp, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; #endif /* CONFIG_SYSCTL */ static struct nf_ct_ext_type tstamp_extend __read_mostly = { .len = sizeof(struct nf_conn_tstamp), .align = __alignof__(struct nf_conn_tstamp), .id = NF_CT_EXT_TSTAMP, }; #ifdef CONFIG_SYSCTL static int nf_conntrack_tstamp_init_sysctl(struct net *net) { struct ctl_table *table; table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table), GFP_KERNEL); if (!table) goto out; table[0].data = &net->ct.sysctl_tstamp; net->ct.tstamp_sysctl_header = register_net_sysctl_table(net, nf_net_netfilter_sysctl_path, table); if (!net->ct.tstamp_sysctl_header) { printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); goto out_register; } return 0; out_register: kfree(table); out: return -ENOMEM; } static void nf_conntrack_tstamp_fini_sysctl(struct net *net) { struct ctl_table *table; table = net->ct.tstamp_sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.tstamp_sysctl_header); kfree(table); } #else static int nf_conntrack_tstamp_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_tstamp_fini_sysctl(struct net *net) { } #endif int nf_conntrack_tstamp_init(struct net *net) { int ret; net->ct.sysctl_tstamp = nf_ct_tstamp; if (net_eq(net, &init_net)) { ret = nf_ct_extend_register(&tstamp_extend); if (ret < 0) { printk(KERN_ERR "nf_ct_tstamp: Unable to register " "extension\n"); goto out_extend_register; } } ret = nf_conntrack_tstamp_init_sysctl(net); if (ret < 0) goto out_sysctl; return 0; out_sysctl: if (net_eq(net, &init_net)) nf_ct_extend_unregister(&tstamp_extend); out_extend_register: return ret; } void nf_conntrack_tstamp_fini(struct net *net) { nf_conntrack_tstamp_fini_sysctl(net); if (net_eq(net, &init_net)) nf_ct_extend_unregister(&tstamp_extend); }
gpl-2.0
livlogik/Evil_Yummy_Gumdrop--Tmo-V10-Kernel
arch/mips/pci/ops-sni.c
5159
3717
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SNI specific PCI support for RM200/RM300. * * Copyright (C) 1997 - 2000, 2003 Ralf Baechle <ralf@linux-mips.org> */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/types.h> #include <asm/sni.h> /* * It seems that on the RM200 only lower 3 bits of the 5 bit PCI device * address are decoded. We therefore manually have to reject attempts at * reading outside this range. Being on the paranoid side we only do this * test for bus 0 and hope forwarding and decoding work properly for any * subordinated busses. * * ASIC PCI only supports type 1 config cycles. */ static int set_config_address(unsigned int busno, unsigned int devfn, int reg) { if ((devfn > 255) || (reg > 255)) return PCIBIOS_BAD_REGISTER_NUMBER; if (busno == 0 && devfn >= PCI_DEVFN(8, 0)) return PCIBIOS_DEVICE_NOT_FOUND; *(volatile u32 *)PCIMT_CONFIG_ADDRESS = ((busno & 0xff) << 16) | ((devfn & 0xff) << 8) | (reg & 0xfc); return PCIBIOS_SUCCESSFUL; } static int pcimt_read(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 * val) { int res; if ((res = set_config_address(bus->number, devfn, reg))) return res; switch (size) { case 1: *val = inb(PCIMT_CONFIG_DATA + (reg & 3)); break; case 2: *val = inw(PCIMT_CONFIG_DATA + (reg & 2)); break; case 4: *val = inl(PCIMT_CONFIG_DATA); break; } return 0; } static int pcimt_write(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 val) { int res; if ((res = set_config_address(bus->number, devfn, reg))) return res; switch (size) { case 1: outb(val, PCIMT_CONFIG_DATA + (reg & 3)); break; case 2: outw(val, PCIMT_CONFIG_DATA + (reg & 2)); break; case 4: outl(val, PCIMT_CONFIG_DATA); break; } return 0; } struct pci_ops sni_pcimt_ops = { .read = pcimt_read, .write = pcimt_write, }; static int pcit_set_config_address(unsigned int busno, unsigned int devfn, int reg) { if ((devfn > 255) || (reg > 255) || (busno > 255)) return PCIBIOS_BAD_REGISTER_NUMBER; outl((1 << 31) | ((busno & 0xff) << 16) | ((devfn & 0xff) << 8) | (reg & 0xfc), 0xcf8); return PCIBIOS_SUCCESSFUL; } static int pcit_read(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 * val) { int res; /* * on bus 0 we need to check, whether there is a device answering * for the devfn by doing a config write and checking the result. If * we don't do it, we will get a data bus error */ if (bus->number == 0) { pcit_set_config_address(0, 0, 0x68); outl(inl(0xcfc) | 0xc0000000, 0xcfc); if ((res = pcit_set_config_address(0, devfn, 0))) return res; outl(0xffffffff, 0xcfc); pcit_set_config_address(0, 0, 0x68); if (inl(0xcfc) & 0x100000) return PCIBIOS_DEVICE_NOT_FOUND; } if ((res = pcit_set_config_address(bus->number, devfn, reg))) return res; switch (size) { case 1: *val = inb(PCIMT_CONFIG_DATA + (reg & 3)); break; case 2: *val = inw(PCIMT_CONFIG_DATA + (reg & 2)); break; case 4: *val = inl(PCIMT_CONFIG_DATA); break; } return 0; } static int pcit_write(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 val) { int res; if ((res = pcit_set_config_address(bus->number, devfn, reg))) return res; switch (size) { case 1: outb(val, PCIMT_CONFIG_DATA + (reg & 3)); break; case 2: outw(val, PCIMT_CONFIG_DATA + (reg & 2)); break; case 4: outl(val, PCIMT_CONFIG_DATA); break; } return 0; } struct pci_ops sni_pcit_ops = { .read = pcit_read, .write = pcit_write, };
gpl-2.0
clamor95/kernel_asus_tf300tl
arch/powerpc/platforms/chrp/nvram.c
10023
2253
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * /dev/nvram driver for PPC * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <asm/uaccess.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/rtas.h> #include "chrp.h" static unsigned int nvram_size; static unsigned char nvram_buf[4]; static DEFINE_SPINLOCK(nvram_lock); static unsigned char chrp_nvram_read(int addr) { unsigned int done; unsigned long flags; unsigned char ret; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n", current->comm, addr, nvram_size); return 0xff; } spin_lock_irqsave(&nvram_lock, flags); if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) ret = 0xff; else ret = nvram_buf[0]; spin_unlock_irqrestore(&nvram_lock, flags); return ret; } static void chrp_nvram_write(int addr, unsigned char val) { unsigned int done; unsigned long flags; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n", current->comm, addr, nvram_size); return; } spin_lock_irqsave(&nvram_lock, flags); nvram_buf[0] = val; if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr); spin_unlock_irqrestore(&nvram_lock, flags); } void __init chrp_nvram_init(void) { struct device_node *nvram; const unsigned int *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); if (nvram == NULL) return; nbytes_p = of_get_property(nvram, "#bytes", &proplen); if (nbytes_p == NULL || proplen != sizeof(unsigned int)) { of_node_put(nvram); return; } nvram_size = *nbytes_p; printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); of_node_put(nvram); ppc_md.nvram_read_val = chrp_nvram_read; ppc_md.nvram_write_val = chrp_nvram_write; return; }
gpl-2.0
mericon/Xperia-S-msm8660
drivers/net/msm_rmnet_sdio.c
296
17308
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * RMNET SDIO Module. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/wakelock.h> #include <linux/if_arp.h> #include <linux/msm_rmnet.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include <mach/sdio_dmux.h> /* Debug message support */ static int msm_rmnet_sdio_debug_mask; module_param_named(debug_enable, msm_rmnet_sdio_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define DEBUG_MASK_LVL0 (1U << 0) #define DEBUG_MASK_LVL1 (1U << 1) #define DEBUG_MASK_LVL2 (1U << 2) #define DBG(m, x...) do { \ if (msm_rmnet_sdio_debug_mask & m) \ pr_info(x); \ } while (0) #define DBG0(x...) DBG(DEBUG_MASK_LVL0, x) #define DBG1(x...) DBG(DEBUG_MASK_LVL1, x) #define DBG2(x...) DBG(DEBUG_MASK_LVL2, x) /* Configure device instances */ #define RMNET_DEVICE_COUNT (8) /* allow larger frames */ #define RMNET_DATA_LEN 2000 #define DEVICE_ID_INVALID -1 #define DEVICE_INACTIVE 0 #define DEVICE_ACTIVE 1 #define HEADROOM_FOR_SDIO 8 /* for mux header */ #define HEADROOM_FOR_QOS 8 #define TAILROOM 8 /* for padding by mux layer */ struct rmnet_private { struct net_device_stats stats; uint32_t ch_id; #ifdef CONFIG_MSM_RMNET_DEBUG ktime_t last_packet; unsigned long wakeups_xmit; unsigned long wakeups_rcv; unsigned long timeout_us; #endif struct sk_buff *skb; spinlock_t lock; struct tasklet_struct tsklt; u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */ uint8_t device_up; uint8_t in_reset; }; #ifdef CONFIG_MSM_RMNET_DEBUG static unsigned long timeout_us; #ifdef CONFIG_HAS_EARLYSUSPEND /* * If early suspend is enabled then we specify two timeout values, * screen on (default), and screen is off. */ static unsigned long timeout_suspend_us; static struct device *rmnet0; /* Set timeout in us when the screen is off. */ static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) { timeout_suspend_us = strict_strtoul(buf, NULL, 10); return n; } static ssize_t timeout_suspend_show(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us); } static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store); static void rmnet_early_suspend(struct early_suspend *handler) { if (rmnet0) { struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); p->timeout_us = timeout_suspend_us; } } static void rmnet_late_resume(struct early_suspend *handler) { if (rmnet0) { struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); p->timeout_us = timeout_us; } } static struct early_suspend rmnet_power_suspend = { .suspend = rmnet_early_suspend, .resume = rmnet_late_resume, }; static int __init rmnet_late_init(void) { register_early_suspend(&rmnet_power_suspend); return 0; } late_initcall(rmnet_late_init); #endif /* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */ static int rmnet_cause_wakeup(struct rmnet_private *p) { int ret = 0; ktime_t now; if (p->timeout_us == 0) /* Check if disabled */ return 0; /* Use real (wall) time. */ now = ktime_get_real(); if (ktime_us_delta(now, p->last_packet) > p->timeout_us) ret = 1; p->last_packet = now; return ret; } static ssize_t wakeups_xmit_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); return sprintf(buf, "%lu\n", p->wakeups_xmit); } DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL); static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); return sprintf(buf, "%lu\n", p->wakeups_rcv); } DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL); /* Set timeout in us. */ static ssize_t timeout_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) { #ifndef CONFIG_HAS_EARLYSUSPEND struct rmnet_private *p = netdev_priv(to_net_dev(d)); p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10); #else /* If using early suspend/resume hooks do not write the value on store. */ timeout_us = strict_strtoul(buf, NULL, 10); #endif return n; } static ssize_t timeout_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); p = netdev_priv(to_net_dev(d)); return sprintf(buf, "%lu\n", timeout_us); } DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store); #endif /* Forward declaration */ static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev) { __be16 protocol = 0; skb->dev = dev; /* Determine L3 protocol */ switch (skb->data[0] & 0xf0) { case 0x40: protocol = htons(ETH_P_IP); break; case 0x60: protocol = htons(ETH_P_IPV6); break; default: pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x", dev->name, skb->data[0] & 0xf0); /* skb will be dropped in upper layer for unknown protocol */ } return protocol; } static int count_this_packet(void *_hdr, int len) { struct ethhdr *hdr = _hdr; if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP)) return 0; return 1; } static int sdio_update_reset_state(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int new_state; new_state = msm_sdio_is_channel_in_reset(p->ch_id); if (p->in_reset != new_state) { p->in_reset = (uint8_t)new_state; if (p->in_reset) netif_carrier_off(dev); else netif_carrier_on(dev); return 1; } return 0; } /* Rx Callback, Called in Work Queue context */ static void sdio_recv_notify(void *dev, struct sk_buff *skb) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; u32 opmode; if (skb) { skb->dev = dev; /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } DBG1("[%s] Rx packet #%lu len=%d\n", ((struct net_device *)dev)->name, p->stats.rx_packets, skb->len); /* Deliver to network stack */ netif_rx(skb); } else { spin_lock_irqsave(&p->lock, flags); if (!sdio_update_reset_state((struct net_device *)dev)) pr_err("[%s] %s: No skb received", ((struct net_device *)dev)->name, __func__); spin_unlock_irqrestore(&p->lock, flags); } } static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int sdio_ret; struct QMI_QOS_HDR_S *qmih; u32 opmode; unsigned long flags; if (!netif_carrier_ok(dev)) { pr_err("[%s] %s: channel in reset", dev->name, __func__); goto xmit_out; } /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_QOS(opmode)) { qmih = (struct QMI_QOS_HDR_S *) skb_push(skb, sizeof(struct QMI_QOS_HDR_S)); qmih->version = 1; qmih->flags = 0; qmih->flow_id = skb->mark; } dev->trans_start = jiffies; sdio_ret = msm_sdio_dmux_write(p->ch_id, skb); if (sdio_ret != 0) { pr_err("[%s] %s: write returned error %d", dev->name, __func__, sdio_ret); goto xmit_out; } if (count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n", dev->name, p->stats.tx_packets, skb->len, skb->mark); return 0; xmit_out: dev_kfree_skb_any(skb); p->stats.tx_errors++; return 0; } static void sdio_write_done(void *dev, struct sk_buff *skb) { struct rmnet_private *p = netdev_priv(dev); if (skb) dev_kfree_skb_any(skb); if (!p->in_reset) { DBG1("%s: write complete skb=%p\n", __func__, skb); if (netif_queue_stopped(dev) && msm_sdio_dmux_is_ch_low(p->ch_id)) { DBG0("%s: Low WM hit, waking queue=%p\n", __func__, skb); netif_wake_queue(dev); } } else { DBG1("%s: write in reset skb=%p\n", __func__, skb); } } static int __rmnet_open(struct net_device *dev) { int r; struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (!p->device_up) { r = msm_sdio_dmux_open(p->ch_id, dev, sdio_recv_notify, sdio_write_done); if (r < 0) return -ENODEV; } p->device_up = DEVICE_ACTIVE; return 0; } static int rmnet_open(struct net_device *dev) { int rc = 0; DBG0("[%s] rmnet_open()\n", dev->name); rc = __rmnet_open(dev); if (rc == 0) netif_start_queue(dev); return rc; } static int __rmnet_close(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int rc = 0; if (p->device_up) { /* do not close rmnet port once up, this causes remote side to hang if tried to open again */ /* rc = msm_sdio_dmux_close(p->ch_id); */ p->device_up = DEVICE_INACTIVE; return rc; } else return -EBADF; } static int rmnet_stop(struct net_device *dev) { DBG0("[%s] rmnet_stop()\n", dev->name); __rmnet_close(dev); netif_stop_queue(dev); return 0; } static int rmnet_change_mtu(struct net_device *dev, int new_mtu) { if (0 > new_mtu || RMNET_DATA_LEN < new_mtu) return -EINVAL; DBG0("[%s] MTU change: old=%d new=%d\n", dev->name, dev->mtu, new_mtu); dev->mtu = new_mtu; return 0; } static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); if (netif_queue_stopped(dev)) { pr_err("[%s]fatal: rmnet_xmit called when " "netif_queue is stopped", dev->name); return 0; } _rmnet_xmit(skb, dev); if (msm_sdio_dmux_is_ch_full(p->ch_id)) { netif_stop_queue(dev); DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb); } return 0; } static struct net_device_stats *rmnet_get_stats(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); return &p->stats; } static void rmnet_set_multicast_list(struct net_device *dev) { } static void rmnet_tx_timeout(struct net_device *dev) { pr_warning("[%s] rmnet_tx_timeout()\n", dev->name); } static const struct net_device_ops rmnet_ops_ether = { .ndo_open = rmnet_open, .ndo_stop = rmnet_stop, .ndo_start_xmit = rmnet_xmit, .ndo_get_stats = rmnet_get_stats, .ndo_set_multicast_list = rmnet_set_multicast_list, .ndo_tx_timeout = rmnet_tx_timeout, .ndo_do_ioctl = rmnet_ioctl, .ndo_change_mtu = rmnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops rmnet_ops_ip = { .ndo_open = rmnet_open, .ndo_stop = rmnet_stop, .ndo_start_xmit = rmnet_xmit, .ndo_get_stats = rmnet_get_stats, .ndo_set_multicast_list = rmnet_set_multicast_list, .ndo_tx_timeout = rmnet_tx_timeout, .ndo_do_ioctl = rmnet_ioctl, .ndo_change_mtu = rmnet_change_mtu, .ndo_set_mac_address = 0, .ndo_validate_addr = 0, }; static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct rmnet_private *p = netdev_priv(dev); u32 old_opmode = p->operation_mode; unsigned long flags; int prev_mtu = dev->mtu; int rc = 0; /* Process IOCTL command */ switch (cmd) { case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */ /* Perform Ethernet config only if in IP mode currently*/ if (p->operation_mode & RMNET_MODE_LLP_IP) { ether_setup(dev); random_ether_addr(dev->dev_addr); dev->mtu = prev_mtu; dev->netdev_ops = &rmnet_ops_ether; spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_LLP_IP; p->operation_mode |= RMNET_MODE_LLP_ETH; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): " "set Ethernet protocol mode\n", dev->name); } break; case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */ /* Perform IP config only if in Ethernet mode currently*/ if (p->operation_mode & RMNET_MODE_LLP_ETH) { /* Undo config done in ether_setup() */ dev->header_ops = 0; /* No header */ dev->type = ARPHRD_RAWIP; dev->hard_header_len = 0; dev->mtu = prev_mtu; dev->addr_len = 0; dev->flags &= ~(IFF_BROADCAST| IFF_MULTICAST); dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS; dev->needed_tailroom = TAILROOM; dev->netdev_ops = &rmnet_ops_ip; spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_LLP_ETH; p->operation_mode |= RMNET_MODE_LLP_IP; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): " "set IP protocol mode\n", dev->name); } break; case RMNET_IOCTL_GET_LLP: /* Get link protocol state */ ifr->ifr_ifru.ifru_data = (void *)(p->operation_mode & (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP)); break; case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */ spin_lock_irqsave(&p->lock, flags); p->operation_mode |= RMNET_MODE_QOS; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n", dev->name); break; case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */ spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_QOS; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n", dev->name); break; case RMNET_IOCTL_GET_QOS: /* Get QoS header state */ ifr->ifr_ifru.ifru_data = (void *)(p->operation_mode & RMNET_MODE_QOS); break; case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode; break; case RMNET_IOCTL_OPEN: /* Open transport port */ rc = __rmnet_open(dev); DBG0("[%s] rmnet_ioctl(): open transport port\n", dev->name); break; case RMNET_IOCTL_CLOSE: /* Close transport port */ rc = __rmnet_close(dev); DBG0("[%s] rmnet_ioctl(): close transport port\n", dev->name); break; default: pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]", dev->name, cmd); return -EINVAL; } DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n", dev->name, __func__, cmd, old_opmode, p->operation_mode); return rc; } static void __init rmnet_setup(struct net_device *dev) { /* Using Ethernet mode by default */ dev->netdev_ops = &rmnet_ops_ether; ether_setup(dev); /* set this after calling ether_setup */ dev->mtu = RMNET_DATA_LEN; dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ; dev->needed_tailroom = TAILROOM; random_ether_addr(dev->dev_addr); dev->watchdog_timeo = 1000; /* 10 seconds? */ } static int __init rmnet_init(void) { int ret; struct device *d; struct net_device *dev; struct rmnet_private *p; unsigned n; pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT); #ifdef CONFIG_MSM_RMNET_DEBUG timeout_us = 0; #ifdef CONFIG_HAS_EARLYSUSPEND timeout_suspend_us = 0; #endif #endif for (n = 0; n < RMNET_DEVICE_COUNT; n++) { dev = alloc_netdev(sizeof(struct rmnet_private), "rmnet_sdio%d", rmnet_setup); if (!dev) return -ENOMEM; d = &(dev->dev); p = netdev_priv(dev); /* Initial config uses Ethernet */ p->operation_mode = RMNET_MODE_LLP_ETH; p->ch_id = n; spin_lock_init(&p->lock); #ifdef CONFIG_MSM_RMNET_DEBUG p->timeout_us = timeout_us; p->wakeups_xmit = p->wakeups_rcv = 0; #endif ret = register_netdev(dev); if (ret) { free_netdev(dev); return ret; } #ifdef CONFIG_MSM_RMNET_DEBUG if (device_create_file(d, &dev_attr_timeout)) continue; if (device_create_file(d, &dev_attr_wakeups_xmit)) continue; if (device_create_file(d, &dev_attr_wakeups_rcv)) continue; #ifdef CONFIG_HAS_EARLYSUSPEND if (device_create_file(d, &dev_attr_timeout_suspend)) continue; /* Only care about rmnet0 for suspend/resume tiemout hooks. */ if (n == 0) rmnet0 = d; #endif #endif } return 0; } module_init(rmnet_init); MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT"); MODULE_LICENSE("GPL v2");
gpl-2.0
skalk/linux
drivers/pci/hotplug/cpci_hotplug_pci.c
296
6429
// SPDX-License-Identifier: GPL-2.0+ /* * CompactPCI Hot Plug Driver PCI functions * * Copyright (C) 2002,2005 by SOMA Networks, Inc. * * All rights reserved. * * Send feedback to <scottm@somanetworks.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/proc_fs.h> #include "../pci.h" #include "cpci_hotplug.h" #define MY_NAME "cpci_hotplug" extern int cpci_debug; #define dbg(format, arg...) \ do { \ if (cpci_debug) \ printk(KERN_DEBUG "%s: " format "\n", \ MY_NAME, ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg) u8 cpci_get_attention_status(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; return hs_csr & 0x0008 ? 1 : 0; } int cpci_set_attention_status(struct slot *slot, int status) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; if (status) hs_csr |= HS_CSR_LOO; else hs_csr &= ~HS_CSR_LOO; if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) return 0; return 1; } u16 cpci_get_hs_csr(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0xFFFF; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0xFFFF; return hs_csr; } int cpci_check_and_clear_ins(struct slot *slot) { int hs_cap; u16 hs_csr; int ins = 0; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; if (hs_csr & HS_CSR_INS) { /* Clear INS (by setting it) */ if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) ins = 0; else ins = 1; } return ins; } int cpci_check_ext(struct slot *slot) { int hs_cap; u16 hs_csr; int ext = 0; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; if (hs_csr & HS_CSR_EXT) ext = 1; return ext; } int cpci_clear_ext(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return -ENODEV; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return -ENODEV; if (hs_csr & HS_CSR_EXT) { /* Clear EXT (by setting it) */ if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) return -ENODEV; } return 0; } int cpci_led_on(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return -ENODEV; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return -ENODEV; if ((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) { hs_csr |= HS_CSR_LOO; if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) { err("Could not set LOO for slot %s", slot_name(slot)); return -ENODEV; } } return 0; } int cpci_led_off(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return -ENODEV; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return -ENODEV; if (hs_csr & HS_CSR_LOO) { hs_csr &= ~HS_CSR_LOO; if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) { err("Could not clear LOO for slot %s", slot_name(slot)); return -ENODEV; } } return 0; } /* * Device configuration functions */ int cpci_configure_slot(struct slot *slot) { struct pci_dev *dev; struct pci_bus *parent; int ret = 0; dbg("%s - enter", __func__); pci_lock_rescan_remove(); if (slot->dev == NULL) { dbg("pci_dev null, finding %02x:%02x:%x", slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn)); slot->dev = pci_get_slot(slot->bus, slot->devfn); } /* Still NULL? Well then scan for it! */ if (slot->dev == NULL) { int n; dbg("pci_dev still null"); /* * This will generate pci_dev structures for all functions, but * we will only call this case when lookup fails. */ n = pci_scan_slot(slot->bus, slot->devfn); dbg("%s: pci_scan_slot returned %d", __func__, n); slot->dev = pci_get_slot(slot->bus, slot->devfn); if (slot->dev == NULL) { err("Could not find PCI device for slot %02x", slot->number); ret = -ENODEV; goto out; } } parent = slot->dev->bus; for_each_pci_bridge(dev, parent) { if (PCI_SLOT(dev->devfn) == PCI_SLOT(slot->devfn)) pci_hp_add_bridge(dev); } pci_assign_unassigned_bridge_resources(parent->self); pci_bus_add_devices(parent); out: pci_unlock_rescan_remove(); dbg("%s - exit", __func__); return ret; } int cpci_unconfigure_slot(struct slot *slot) { struct pci_dev *dev, *temp; dbg("%s - enter", __func__); if (!slot->dev) { err("No device for slot %02x\n", slot->number); return -ENODEV; } pci_lock_rescan_remove(); list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) { if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) continue; pci_dev_get(dev); pci_stop_and_remove_bus_device(dev); pci_dev_put(dev); } pci_dev_put(slot->dev); slot->dev = NULL; pci_unlock_rescan_remove(); dbg("%s - exit", __func__); return 0; }
gpl-2.0