repo_name
string
path
string
copies
string
size
string
content
string
license
string
kbc-developers/android_kernel_samsung_exynos4210jpn
arch/arm/mach-sa1100/neponset.c
3078
7659
/* * linux/arch/arm/mach-sa1100/neponset.c * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/serial_sa1100.h> #include <mach/assabet.h> #include <mach/neponset.h> #include <asm/hardware/sa1111.h> #include <asm/sizes.h> /* * Install handler for Neponset IRQ. Note that we have to loop here * since the ETHERNET and USAR IRQs are level based, and we need to * ensure that the IRQ signal is deasserted before returning. This * is rather unfortunate. */ static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned int irr; while (1) { /* * Acknowledge the parent IRQ. */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* * Read the interrupt reason register. Let's have all * active IRQ bits high. Note: there is a typo in the * Neponset user's guide for the SA1111 IRR level. */ irr = IRR ^ (IRR_ETHERNET | IRR_USAR); if ((irr & (IRR_ETHERNET | IRR_USAR | IRR_SA1111)) == 0) break; /* * Since there is no individual mask, we have to * mask the parent IRQ. This is safe, since we'll * recheck the register for any pending IRQs. */ if (irr & (IRR_ETHERNET | IRR_USAR)) { desc->irq_data.chip->irq_mask(&desc->irq_data); /* * Ack the interrupt now to prevent re-entering * this neponset handler. Again, this is safe * since we'll check the IRR register prior to * leaving. */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (irr & IRR_ETHERNET) { generic_handle_irq(IRQ_NEPONSET_SMC9196); } if (irr & IRR_USAR) { generic_handle_irq(IRQ_NEPONSET_USAR); } desc->irq_data.chip->irq_unmask(&desc->irq_data); } if (irr & IRR_SA1111) { generic_handle_irq(IRQ_NEPONSET_SA1111); } } } static void neponset_set_mctrl(struct uart_port *port, u_int mctrl) { u_int mdm_ctl0 = MDM_CTL_0; if (port->mapbase == _Ser1UTCR0) { if (mctrl & TIOCM_RTS) mdm_ctl0 &= ~MDM_CTL0_RTS2; else mdm_ctl0 |= MDM_CTL0_RTS2; if (mctrl & TIOCM_DTR) mdm_ctl0 &= ~MDM_CTL0_DTR2; else mdm_ctl0 |= MDM_CTL0_DTR2; } else if (port->mapbase == _Ser3UTCR0) { if (mctrl & TIOCM_RTS) mdm_ctl0 &= ~MDM_CTL0_RTS1; else mdm_ctl0 |= MDM_CTL0_RTS1; if (mctrl & TIOCM_DTR) mdm_ctl0 &= ~MDM_CTL0_DTR1; else mdm_ctl0 |= MDM_CTL0_DTR1; } MDM_CTL_0 = mdm_ctl0; } static u_int neponset_get_mctrl(struct uart_port *port) { u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; u_int mdm_ctl1 = MDM_CTL_1; if (port->mapbase == _Ser1UTCR0) { if (mdm_ctl1 & MDM_CTL1_DCD2) ret &= ~TIOCM_CD; if (mdm_ctl1 & MDM_CTL1_CTS2) ret &= ~TIOCM_CTS; if (mdm_ctl1 & MDM_CTL1_DSR2) ret &= ~TIOCM_DSR; } else if (port->mapbase == _Ser3UTCR0) { if (mdm_ctl1 & MDM_CTL1_DCD1) ret &= ~TIOCM_CD; if (mdm_ctl1 & MDM_CTL1_CTS1) ret &= ~TIOCM_CTS; if (mdm_ctl1 & MDM_CTL1_DSR1) ret &= ~TIOCM_DSR; } return ret; } static struct sa1100_port_fns neponset_port_fns __devinitdata = { .set_mctrl = neponset_set_mctrl, .get_mctrl = neponset_get_mctrl, }; static int __devinit neponset_probe(struct platform_device *dev) { sa1100_register_uart_fns(&neponset_port_fns); /* * Install handler for GPIO25. */ irq_set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING); irq_set_chained_handler(IRQ_GPIO25, neponset_irq_handler); /* * We would set IRQ_GPIO25 to be a wake-up IRQ, but * unfortunately something on the Neponset activates * this IRQ on sleep (ethernet?) */ #if 0 enable_irq_wake(IRQ_GPIO25); #endif /* * Setup other Neponset IRQs. SA1111 will be done by the * generic SA1111 code. */ irq_set_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq); set_irq_flags(IRQ_NEPONSET_SMC9196, IRQF_VALID | IRQF_PROBE); irq_set_handler(IRQ_NEPONSET_USAR, handle_simple_irq); set_irq_flags(IRQ_NEPONSET_USAR, IRQF_VALID | IRQF_PROBE); /* * Disable GPIO 0/1 drivers so the buttons work on the module. */ NCR_0 = NCR_GP01_OFF; return 0; } #ifdef CONFIG_PM /* * LDM power management. */ static unsigned int neponset_saved_state; static int neponset_suspend(struct platform_device *dev, pm_message_t state) { /* * Save state. */ neponset_saved_state = NCR_0; return 0; } static int neponset_resume(struct platform_device *dev) { NCR_0 = neponset_saved_state; return 0; } #else #define neponset_suspend NULL #define neponset_resume NULL #endif static struct platform_driver neponset_device_driver = { .probe = neponset_probe, .suspend = neponset_suspend, .resume = neponset_resume, .driver = { .name = "neponset", }, }; static struct resource neponset_resources[] = { [0] = { .start = 0x10000000, .end = 0x17ffffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device neponset_device = { .name = "neponset", .id = 0, .num_resources = ARRAY_SIZE(neponset_resources), .resource = neponset_resources, }; static struct resource sa1111_resources[] = { [0] = { .start = 0x40000000, .end = 0x40001fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NEPONSET_SA1111, .end = IRQ_NEPONSET_SA1111, .flags = IORESOURCE_IRQ, }, }; static struct sa1111_platform_data sa1111_info = { .irq_base = IRQ_BOARD_END, }; static u64 sa1111_dmamask = 0xffffffffUL; static struct platform_device sa1111_device = { .name = "sa1111", .id = 0, .dev = { .dma_mask = &sa1111_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &sa1111_info, }, .num_resources = ARRAY_SIZE(sa1111_resources), .resource = sa1111_resources, }; static struct resource smc91x_resources[] = { [0] = { .name = "smc91x-regs", .start = SA1100_CS3_PHYS, .end = SA1100_CS3_PHYS + 0x01ffffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NEPONSET_SMC9196, .end = IRQ_NEPONSET_SMC9196, .flags = IORESOURCE_IRQ, }, [2] = { .name = "smc91x-attrib", .start = SA1100_CS3_PHYS + 0x02000000, .end = SA1100_CS3_PHYS + 0x03ffffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct platform_device *devices[] __initdata = { &neponset_device, &sa1111_device, &smc91x_device, }; extern void sa1110_mb_disable(void); static int __init neponset_init(void) { platform_driver_register(&neponset_device_driver); /* * The Neponset is only present on the Assabet machine type. */ if (!machine_is_assabet()) return -ENODEV; /* * Ensure that the memory bus request/grant signals are setup, * and the grant is held in its inactive state, whether or not * we actually have a Neponset attached. */ sa1110_mb_disable(); if (!machine_has_neponset()) { printk(KERN_DEBUG "Neponset expansion board not present\n"); return -ENODEV; } if (WHOAMI != 0x11) { printk(KERN_WARNING "Neponset board detected, but " "wrong ID: %02x\n", WHOAMI); return -ENODEV; } return platform_add_devices(devices, ARRAY_SIZE(devices)); } subsys_initcall(neponset_init); static struct map_desc neponset_io_desc[] __initdata = { { /* System Registers */ .virtual = 0xf3000000, .pfn = __phys_to_pfn(0x10000000), .length = SZ_1M, .type = MT_DEVICE }, { /* SA-1111 */ .virtual = 0xf4000000, .pfn = __phys_to_pfn(0x40000000), .length = SZ_1M, .type = MT_DEVICE } }; void __init neponset_map_io(void) { iotable_init(neponset_io_desc, ARRAY_SIZE(neponset_io_desc)); }
gpl-2.0
jmztaylor/android_kernel_lge_ls720
scripts/kconfig/confdata.c
3846
23237
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ #include <sys/stat.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include "lkc.h" static void conf_warning(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static void conf_message(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static const char *conf_filename; static int conf_lineno, conf_warnings, conf_unsaved; const char conf_defname[] = "arch/$ARCH/defconfig"; static void conf_warning(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "%s:%d:warning: ", conf_filename, conf_lineno); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); conf_warnings++; } static void conf_default_message_callback(const char *fmt, va_list ap) { printf("#\n# "); vprintf(fmt, ap); printf("\n#\n"); } static void (*conf_message_callback) (const char *fmt, va_list ap) = conf_default_message_callback; void conf_set_message_callback(void (*fn) (const char *fmt, va_list ap)) { conf_message_callback = fn; } static void conf_message(const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (conf_message_callback) conf_message_callback(fmt, ap); } const char *conf_get_configname(void) { char *name = getenv("KCONFIG_CONFIG"); return name ? name : ".config"; } const char *conf_get_autoconfig_name(void) { char *name = getenv("KCONFIG_AUTOCONFIG"); return name ? name : "include/config/auto.conf"; } static char *conf_expand_value(const char *in) { struct symbol *sym; const char *src; static char res_value[SYMBOL_MAXLENGTH]; char *dst, name[SYMBOL_MAXLENGTH]; res_value[0] = 0; dst = name; while ((src = strchr(in, '$'))) { strncat(res_value, in, src - in); src++; dst = name; while (isalnum(*src) || *src == '_') *dst++ = *src++; *dst = 0; sym = sym_lookup(name, 0); sym_calc_value(sym); strcat(res_value, sym_get_string_value(sym)); in = src; } strcat(res_value, in); return res_value; } char *conf_get_default_confname(void) { struct stat buf; static char fullname[PATH_MAX+1]; char *env, *name; name = conf_expand_value(conf_defname); env = getenv(SRCTREE); if (env) { sprintf(fullname, "%s/%s", env, name); if (!stat(fullname, &buf)) return fullname; } return name; } static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) { char *p2; switch (sym->type) { case S_TRISTATE: if (p[0] == 'm') { sym->def[def].tri = mod; sym->flags |= def_flags; break; } /* fall through */ case S_BOOLEAN: if (p[0] == 'y') { sym->def[def].tri = yes; sym->flags |= def_flags; break; } if (p[0] == 'n') { sym->def[def].tri = no; sym->flags |= def_flags; break; } conf_warning("symbol value '%s' invalid for %s", p, sym->name); return 1; case S_OTHER: if (*p != '"') { for (p2 = p; *p2 && !isspace(*p2); p2++) ; sym->type = S_STRING; goto done; } /* fall through */ case S_STRING: if (*p++ != '"') break; for (p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++) { if (*p2 == '"') { *p2 = 0; break; } memmove(p2, p2 + 1, strlen(p2)); } if (!p2) { conf_warning("invalid string found"); return 1; } /* fall through */ case S_INT: case S_HEX: done: if (sym_string_valid(sym, p)) { sym->def[def].val = strdup(p); sym->flags |= def_flags; } else { conf_warning("symbol value '%s' invalid for %s", p, sym->name); return 1; } break; default: ; } return 0; } int conf_read_simple(const char *name, int def) { FILE *in = NULL; char line[1024]; char *p, *p2; struct symbol *sym; int i, def_flags; if (name) { in = zconf_fopen(name); } else { struct property *prop; name = conf_get_configname(); in = zconf_fopen(name); if (in) goto load; sym_add_change_count(1); if (!sym_defconfig_list) { if (modules_sym) sym_calc_value(modules_sym); return 1; } for_all_defaults(sym_defconfig_list, prop) { if (expr_calc_value(prop->visible.expr) == no || prop->expr->type != E_SYMBOL) continue; name = conf_expand_value(prop->expr->left.sym->name); in = zconf_fopen(name); if (in) { conf_message(_("using defaults found in %s"), name); goto load; } } } if (!in) return 1; load: conf_filename = name; conf_lineno = 0; conf_warnings = 0; conf_unsaved = 0; def_flags = SYMBOL_DEF << def; for_all_symbols(i, sym) { sym->flags |= SYMBOL_CHANGED; sym->flags &= ~(def_flags|SYMBOL_VALID); if (sym_is_choice(sym)) sym->flags |= def_flags; switch (sym->type) { case S_INT: case S_HEX: case S_STRING: if (sym->def[def].val) free(sym->def[def].val); /* fall through */ default: sym->def[def].val = NULL; sym->def[def].tri = no; } } while (fgets(line, sizeof(line), in)) { conf_lineno++; sym = NULL; if (line[0] == '#') { if (memcmp(line + 2, CONFIG_, strlen(CONFIG_))) continue; p = strchr(line + 2 + strlen(CONFIG_), ' '); if (!p) continue; *p++ = 0; if (strncmp(p, "is not set", 10)) continue; if (def == S_DEF_USER) { sym = sym_find(line + 2 + strlen(CONFIG_)); if (!sym) { sym_add_change_count(1); goto setsym; } } else { sym = sym_lookup(line + 2 + strlen(CONFIG_), 0); if (sym->type == S_UNKNOWN) sym->type = S_BOOLEAN; } if (sym->flags & def_flags) { conf_warning("override: reassigning to symbol %s", sym->name); } switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: sym->def[def].tri = no; sym->flags |= def_flags; break; default: ; } } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) { p = strchr(line + strlen(CONFIG_), '='); if (!p) continue; *p++ = 0; p2 = strchr(p, '\n'); if (p2) { *p2-- = 0; if (*p2 == '\r') *p2 = 0; } if (def == S_DEF_USER) { sym = sym_find(line + strlen(CONFIG_)); if (!sym) { sym_add_change_count(1); goto setsym; } } else { sym = sym_lookup(line + strlen(CONFIG_), 0); if (sym->type == S_UNKNOWN) sym->type = S_OTHER; } if (sym->flags & def_flags) { conf_warning("override: reassigning to symbol %s", sym->name); } if (conf_set_sym_val(sym, def, def_flags, p)) continue; } else { if (line[0] != '\r' && line[0] != '\n') conf_warning("unexpected data"); continue; } setsym: if (sym && sym_is_choice_value(sym)) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); switch (sym->def[def].tri) { case no: break; case mod: if (cs->def[def].tri == yes) { conf_warning("%s creates inconsistent choice state", sym->name); cs->flags &= ~def_flags; } break; case yes: if (cs->def[def].tri != no) conf_warning("override: %s changes choice state", sym->name); cs->def[def].val = sym; break; } cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri); } } fclose(in); if (modules_sym) sym_calc_value(modules_sym); return 0; } int conf_read(const char *name) { struct symbol *sym; int i; sym_set_change_count(0); if (conf_read_simple(name, S_DEF_USER)) return 1; for_all_symbols(i, sym) { sym_calc_value(sym); if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO)) continue; if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) { /* check that calculated value agrees with saved value */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym)) break; if (!sym_is_choice(sym)) continue; /* fall through */ default: if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) continue; break; } } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE)) /* no previous value and not saved */ continue; conf_unsaved++; /* maybe print value in verbose mode... */ } for_all_symbols(i, sym) { if (sym_has_value(sym) && !sym_is_choice_value(sym)) { /* Reset values of generates values, so they'll appear * as new, if they should become visible, but that * doesn't quite work if the Kconfig and the saved * configuration disagree. */ if (sym->visible == no && !conf_unsaved) sym->flags &= ~SYMBOL_DEF_USER; switch (sym->type) { case S_STRING: case S_INT: case S_HEX: /* Reset a string value if it's out of range */ if (sym_string_within_range(sym, sym->def[S_DEF_USER].val)) break; sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER); conf_unsaved++; break; default: break; } } } sym_add_change_count(conf_warnings || conf_unsaved); return 0; } /* * Kconfig configuration printer * * This printer is used when generating the resulting configuration after * kconfig invocation and `defconfig' files. Unset symbol might be omitted by * passing a non-NULL argument to the printer. * */ static void kconfig_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (*value == 'n') { bool skip_unset = (arg != NULL); if (!skip_unset) fprintf(fp, "# %s%s is not set\n", CONFIG_, sym->name); return; } break; default: break; } fprintf(fp, "%s%s=%s\n", CONFIG_, sym->name, value); } static void kconfig_print_comment(FILE *fp, const char *value, void *arg) { const char *p = value; size_t l; for (;;) { l = strcspn(p, "\n"); fprintf(fp, "#"); if (l) { fprintf(fp, " "); xfwrite(p, l, 1, fp); p += l; } fprintf(fp, "\n"); if (*p++ == '\0') break; } } static struct conf_printer kconfig_printer_cb = { .print_symbol = kconfig_print_symbol, .print_comment = kconfig_print_comment, }; /* * Header printer * * This printer is used when generating the `include/generated/autoconf.h' file. */ static void header_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: { const char *suffix = ""; switch (*value) { case 'n': break; case 'm': suffix = "_MODULE"; /* fall through */ default: fprintf(fp, "#define %s%s%s 1\n", CONFIG_, sym->name, suffix); } break; } case S_HEX: { const char *prefix = ""; if (value[0] != '0' || (value[1] != 'x' && value[1] != 'X')) prefix = "0x"; fprintf(fp, "#define %s%s %s%s\n", CONFIG_, sym->name, prefix, value); break; } case S_STRING: case S_INT: fprintf(fp, "#define %s%s %s\n", CONFIG_, sym->name, value); break; default: break; } } static void header_print_comment(FILE *fp, const char *value, void *arg) { const char *p = value; size_t l; fprintf(fp, "/*\n"); for (;;) { l = strcspn(p, "\n"); fprintf(fp, " *"); if (l) { fprintf(fp, " "); xfwrite(p, l, 1, fp); p += l; } fprintf(fp, "\n"); if (*p++ == '\0') break; } fprintf(fp, " */\n"); } static struct conf_printer header_printer_cb = { .print_symbol = header_print_symbol, .print_comment = header_print_comment, }; /* * Tristate printer * * This printer is used when generating the `include/config/tristate.conf' file. */ static void tristate_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { if (sym->type == S_TRISTATE && *value != 'n') fprintf(fp, "%s%s=%c\n", CONFIG_, sym->name, (char)toupper(*value)); } static struct conf_printer tristate_printer_cb = { .print_symbol = tristate_print_symbol, .print_comment = kconfig_print_comment, }; static void conf_write_symbol(FILE *fp, struct symbol *sym, struct conf_printer *printer, void *printer_arg) { const char *str; switch (sym->type) { case S_OTHER: case S_UNKNOWN: break; case S_STRING: str = sym_get_string_value(sym); str = sym_escape_string_value(str); printer->print_symbol(fp, sym, str, printer_arg); free((void *)str); break; default: str = sym_get_string_value(sym); printer->print_symbol(fp, sym, str, printer_arg); } } static void conf_write_heading(FILE *fp, struct conf_printer *printer, void *printer_arg) { char buf[256]; snprintf(buf, sizeof(buf), "\n" "Automatically generated file; DO NOT EDIT.\n" "%s\n", rootmenu.prompt->text); printer->print_comment(fp, buf, printer_arg); } /* * Write out a minimal config. * All values that has default values are skipped as this is redundant. */ int conf_write_defconfig(const char *filename) { struct symbol *sym; struct menu *menu; FILE *out; out = fopen(filename, "w"); if (!out) return 1; sym_clear_all_valid(); /* Traverse all menus to find all relevant symbols */ menu = rootmenu.list; while (menu != NULL) { sym = menu->sym; if (sym == NULL) { if (!menu_is_visible(menu)) goto next_menu; } else if (!sym_is_choice(sym)) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE)) goto next_menu; sym->flags &= ~SYMBOL_WRITE; /* If we cannot change the symbol - skip */ if (!sym_is_changable(sym)) goto next_menu; /* If symbol equals to default value - skip */ if (strcmp(sym_get_string_value(sym), sym_get_string_default(sym)) == 0) goto next_menu; /* * If symbol is a choice value and equals to the * default for a choice - skip. * But only if value is bool and equal to "y" and * choice is not "optional". * (If choice is "optional" then all values can be "n") */ if (sym_is_choice_value(sym)) { struct symbol *cs; struct symbol *ds; cs = prop_get_symbol(sym_get_choice_prop(sym)); ds = sym_choice_default(cs); if (!sym_is_optional(cs) && sym == ds) { if ((sym->type == S_BOOLEAN) && sym_get_tristate_value(sym) == yes) goto next_menu; } } conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); } next_menu: if (menu->list != NULL) { menu = menu->list; } else if (menu->next != NULL) { menu = menu->next; } else { while ((menu = menu->parent)) { if (menu->next != NULL) { menu = menu->next; break; } } } } fclose(out); return 0; } int conf_write(const char *name) { FILE *out; struct symbol *sym; struct menu *menu; const char *basename; const char *str; char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; char *env; dirname[0] = 0; if (name && name[0]) { struct stat st; char *slash; if (!stat(name, &st) && S_ISDIR(st.st_mode)) { strcpy(dirname, name); strcat(dirname, "/"); basename = conf_get_configname(); } else if ((slash = strrchr(name, '/'))) { int size = slash - name + 1; memcpy(dirname, name, size); dirname[size] = 0; if (slash[1]) basename = slash + 1; else basename = conf_get_configname(); } else basename = name; } else basename = conf_get_configname(); sprintf(newname, "%s%s", dirname, basename); env = getenv("KCONFIG_OVERWRITECONFIG"); if (!env || !*env) { sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid()); out = fopen(tmpname, "w"); } else { *tmpname = 0; out = fopen(newname, "w"); } if (!out) return 1; conf_write_heading(out, &kconfig_printer_cb, NULL); if (!conf_get_changed()) sym_clear_all_valid(); menu = rootmenu.list; while (menu) { sym = menu->sym; if (!sym) { if (!menu_is_visible(menu)) goto next; str = menu_get_prompt(menu); fprintf(out, "\n" "#\n" "# %s\n" "#\n", str); } else if (!(sym->flags & SYMBOL_CHOICE)) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE)) goto next; sym->flags &= ~SYMBOL_WRITE; conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); } next: if (menu->list) { menu = menu->list; continue; } if (menu->next) menu = menu->next; else while ((menu = menu->parent)) { if (menu->next) { menu = menu->next; break; } } } fclose(out); if (*tmpname) { strcat(dirname, basename); strcat(dirname, ".old"); rename(newname, dirname); if (rename(tmpname, newname)) return 1; } conf_message(_("configuration written to %s"), newname); sym_set_change_count(0); return 0; } static int conf_split_config(void) { const char *name; char path[PATH_MAX+1]; char *s, *d, c; struct symbol *sym; struct stat sb; int res, i, fd; name = conf_get_autoconfig_name(); conf_read_simple(name, S_DEF_AUTO); if (chdir("include/config")) return 1; res = 0; for_all_symbols(i, sym) { sym_calc_value(sym); if ((sym->flags & SYMBOL_AUTO) || !sym->name) continue; if (sym->flags & SYMBOL_WRITE) { if (sym->flags & SYMBOL_DEF_AUTO) { /* * symbol has old and new value, * so compare them... */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym_get_tristate_value(sym) == sym->def[S_DEF_AUTO].tri) continue; break; case S_STRING: case S_HEX: case S_INT: if (!strcmp(sym_get_string_value(sym), sym->def[S_DEF_AUTO].val)) continue; break; default: break; } } else { /* * If there is no old value, only 'no' (unset) * is allowed as new value. */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym_get_tristate_value(sym) == no) continue; break; default: break; } } } else if (!(sym->flags & SYMBOL_DEF_AUTO)) /* There is neither an old nor a new value. */ continue; /* else * There is an old value, but no new value ('no' (unset) * isn't saved in auto.conf, so the old value is always * different from 'no'). */ /* Replace all '_' and append ".h" */ s = sym->name; d = path; while ((c = *s++)) { c = tolower(c); *d++ = (c == '_') ? '/' : c; } strcpy(d, ".h"); /* Assume directory path already exists. */ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); if (fd == -1) { if (errno != ENOENT) { res = 1; break; } /* * Create directory components, * unless they exist already. */ d = path; while ((d = strchr(d, '/'))) { *d = 0; if (stat(path, &sb) && mkdir(path, 0755)) { res = 1; goto out; } *d++ = '/'; } /* Try it again. */ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); if (fd == -1) { res = 1; break; } } close(fd); } out: if (chdir("../..")) return 1; return res; } int conf_write_autoconf(void) { struct symbol *sym; const char *name; FILE *out, *tristate, *out_h; int i; sym_clear_all_valid(); file_write_dep("include/config/auto.conf.cmd"); if (conf_split_config()) return 1; out = fopen(".tmpconfig", "w"); if (!out) return 1; tristate = fopen(".tmpconfig_tristate", "w"); if (!tristate) { fclose(out); return 1; } out_h = fopen(".tmpconfig.h", "w"); if (!out_h) { fclose(out); fclose(tristate); return 1; } conf_write_heading(out, &kconfig_printer_cb, NULL); conf_write_heading(tristate, &tristate_printer_cb, NULL); conf_write_heading(out_h, &header_printer_cb, NULL); for_all_symbols(i, sym) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE) || !sym->name) continue; /* write symbol to auto.conf, tristate and header files */ conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1); conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1); conf_write_symbol(out_h, sym, &header_printer_cb, NULL); } fclose(out); fclose(tristate); fclose(out_h); name = getenv("KCONFIG_AUTOHEADER"); if (!name) name = "include/generated/autoconf.h"; if (rename(".tmpconfig.h", name)) return 1; name = getenv("KCONFIG_TRISTATE"); if (!name) name = "include/config/tristate.conf"; if (rename(".tmpconfig_tristate", name)) return 1; name = conf_get_autoconfig_name(); /* * This must be the last step, kbuild has a dependency on auto.conf * and this marks the successful completion of the previous steps. */ if (rename(".tmpconfig", name)) return 1; return 0; } static int sym_change_count; static void (*conf_changed_callback)(void); void sym_set_change_count(int count) { int _sym_change_count = sym_change_count; sym_change_count = count; if (conf_changed_callback && (bool)_sym_change_count != (bool)count) conf_changed_callback(); } void sym_add_change_count(int count) { sym_set_change_count(count + sym_change_count); } bool conf_get_changed(void) { return sym_change_count; } void conf_set_changed_callback(void (*fn)(void)) { conf_changed_callback = fn; } static void randomize_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; struct expr *e; int cnt, def; /* * If choice is mod then we may have more items selected * and if no then no-one. * In both cases stop. */ if (csym->curr.tri != yes) return; prop = sym_get_choice_prop(csym); /* count entries in choice block */ cnt = 0; expr_list_for_each_sym(prop->expr, e, sym) cnt++; /* * find a random value and set it to yes, * set the rest to no so we have only one set */ def = (rand() % cnt); cnt = 0; expr_list_for_each_sym(prop->expr, e, sym) { if (def == cnt++) { sym->def[S_DEF_USER].tri = yes; csym->def[S_DEF_USER].val = sym; } else { sym->def[S_DEF_USER].tri = no; } } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID); } static void set_all_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; struct expr *e; prop = sym_get_choice_prop(csym); /* * Set all non-assinged choice values to no */ expr_list_for_each_sym(prop->expr, e, sym) { if (!sym_has_value(sym)) sym->def[S_DEF_USER].tri = no; } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID); } void conf_set_all_new_symbols(enum conf_def_mode mode) { struct symbol *sym, *csym; int i, cnt; for_all_symbols(i, sym) { if (sym_has_value(sym)) continue; switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: switch (mode) { case def_yes: sym->def[S_DEF_USER].tri = yes; break; case def_mod: sym->def[S_DEF_USER].tri = mod; break; case def_no: sym->def[S_DEF_USER].tri = no; break; case def_random: cnt = sym_get_type(sym) == S_TRISTATE ? 3 : 2; sym->def[S_DEF_USER].tri = (tristate)(rand() % cnt); break; default: continue; } if (!(sym_is_choice(sym) && mode == def_random)) sym->flags |= SYMBOL_DEF_USER; break; default: break; } } sym_clear_all_valid(); /* * We have different type of choice blocks. * If curr.tri equals to mod then we can select several * choice symbols in one block. * In this case we do nothing. * If curr.tri equals yes then only one symbol can be * selected in a choice block and we set it to yes, * and the rest to no. */ for_all_symbols(i, csym) { if (sym_has_value(csym) || !sym_is_choice(csym)) continue; sym_calc_value(csym); if (mode == def_random) randomize_choice_values(csym); else set_all_choice_values(csym); } }
gpl-2.0
AntonX/kernel-omap4-common
drivers/net/irda/act200l-sir.c
11526
7155
/********************************************************************* * * Filename: act200l.c * Version: 0.8 * Description: Implementation for the ACTiSYS ACT-IR200L dongle * Status: Experimental. * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp> * Created at: Fri Aug 3 17:35:42 2001 * Modified at: Fri Aug 17 10:22:40 2001 * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp> * * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ********************************************************************/ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" static int act200l_reset(struct sir_dev *dev); static int act200l_open(struct sir_dev *dev); static int act200l_close(struct sir_dev *dev); static int act200l_change_speed(struct sir_dev *dev, unsigned speed); /* Regsiter 0: Control register #1 */ #define ACT200L_REG0 0x00 #define ACT200L_TXEN 0x01 /* Enable transmitter */ #define ACT200L_RXEN 0x02 /* Enable receiver */ /* Register 1: Control register #2 */ #define ACT200L_REG1 0x10 #define ACT200L_LODB 0x01 /* Load new baud rate count value */ #define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */ /* Register 4: Output Power register */ #define ACT200L_REG4 0x40 #define ACT200L_OP0 0x01 /* Enable LED1C output */ #define ACT200L_OP1 0x02 /* Enable LED2C output */ #define ACT200L_BLKR 0x04 /* Register 5: Receive Mode register */ #define ACT200L_REG5 0x50 #define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */ /* Register 6: Receive Sensitivity register #1 */ #define ACT200L_REG6 0x60 #define ACT200L_RS0 0x01 /* receive threshold bit 0 */ #define ACT200L_RS1 0x02 /* receive threshold bit 1 */ /* Register 7: Receive Sensitivity register #2 */ #define ACT200L_REG7 0x70 #define ACT200L_ENPOS 0x04 /* Ignore the falling edge */ /* Register 8,9: Baud Rate Dvider register #1,#2 */ #define ACT200L_REG8 0x80 #define ACT200L_REG9 0x90 #define ACT200L_2400 0x5f #define ACT200L_9600 0x17 #define ACT200L_19200 0x0b #define ACT200L_38400 0x05 #define ACT200L_57600 0x03 #define ACT200L_115200 0x01 /* Register 13: Control register #3 */ #define ACT200L_REG13 0xd0 #define ACT200L_SHDW 0x01 /* Enable access to shadow registers */ /* Register 15: Status register */ #define ACT200L_REG15 0xf0 /* Register 21: Control register #4 */ #define ACT200L_REG21 0x50 #define ACT200L_EXCK 0x02 /* Disable clock output driver */ #define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */ static struct dongle_driver act200l = { .owner = THIS_MODULE, .driver_name = "ACTiSYS ACT-IR200L", .type = IRDA_ACT200L_DONGLE, .open = act200l_open, .close = act200l_close, .reset = act200l_reset, .set_speed = act200l_change_speed, }; static int __init act200l_sir_init(void) { return irda_register_dongle(&act200l); } static void __exit act200l_sir_cleanup(void) { irda_unregister_dongle(&act200l); } static int act200l_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__ ); /* Power on the dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Set the speeds we can accept */ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; qos->min_turn_time.bits = 0x03; irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int act200l_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__ ); /* Power off the dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function act200l_change_speed (dev, speed) * * Set the speed for the ACTiSYS ACT-IR200L type dongle. * */ static int act200l_change_speed(struct sir_dev *dev, unsigned speed) { u8 control[3]; int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__ ); /* Clear DTR and set RTS to enter command mode */ sirdev_set_dtr_rts(dev, FALSE, TRUE); switch (speed) { default: ret = -EINVAL; /* fall through */ case 9600: control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f); control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f); break; case 19200: control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f); control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f); break; case 38400: control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f); control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f); break; case 57600: control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f); control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f); break; case 115200: control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f); control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f); break; } control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE; /* Write control bytes */ sirdev_raw_write(dev, control, 3); msleep(5); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); dev->speed = speed; return ret; } /* * Function act200l_reset (driver) * * Reset the ACTiSYS ACT-IR200L type dongle. */ #define ACT200L_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1) #define ACT200L_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2) static int act200l_reset(struct sir_dev *dev) { unsigned state = dev->fsm.substate; unsigned delay = 0; static const u8 control[9] = { ACT200L_REG15, ACT200L_REG13 | ACT200L_SHDW, ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL, ACT200L_REG13, ACT200L_REG7 | ACT200L_ENPOS, ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1, ACT200L_REG5 | ACT200L_RWIDL, ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR, ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN }; int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__ ); switch (state) { case SIRDEV_STATE_DONGLE_RESET: /* Reset the dongle : set RTS low for 25 ms */ sirdev_set_dtr_rts(dev, TRUE, FALSE); state = ACT200L_STATE_WAIT1_RESET; delay = 50; break; case ACT200L_STATE_WAIT1_RESET: /* Clear DTR and set RTS to enter command mode */ sirdev_set_dtr_rts(dev, FALSE, TRUE); udelay(25); /* better wait for some short while */ /* Write control bytes */ sirdev_raw_write(dev, control, sizeof(control)); state = ACT200L_STATE_WAIT2_RESET; delay = 15; break; case ACT200L_STATE_WAIT2_RESET: /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); dev->speed = 9600; break; default: IRDA_ERROR("%s(), unknown state %d\n", __func__, state); ret = -1; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; } MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>"); MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */ module_init(act200l_sir_init); module_exit(act200l_sir_cleanup);
gpl-2.0
htdevices/linux-2.6-imx
arch/mips/mti-malta/malta-console.c
11782
1283
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Putting things on the screen/serial line using YAMONs facilities. */ #include <linux/console.h> #include <linux/init.h> #include <linux/serial_reg.h> #include <asm/io.h> #define PORT(offset) (0x3f8 + (offset)) static inline unsigned int serial_in(int offset) { return inb(PORT(offset)); } static inline void serial_out(int offset, int value) { outb(value, PORT(offset)); } int prom_putchar(char c) { while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0) ; serial_out(UART_TX, c); return 1; }
gpl-2.0
miselin/gpxe
src/drivers/net/efi/snpnet.c
7
9710
/* * Copyright (C) 2010 VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ FILE_LICENCE ( GPL2_OR_LATER ); #include <errno.h> #include <string.h> #include <gpxe/io.h> #include <gpxe/iobuf.h> #include <gpxe/netdevice.h> #include <gpxe/if_ether.h> #include <gpxe/ethernet.h> #include <gpxe/efi/efi.h> #include <gpxe/efi/Protocol/SimpleNetwork.h> #include "snp.h" #include "snpnet.h" /** @file * * SNP network device driver * */ /** SNP net device structure */ struct snpnet_device { /** The underlying simple network protocol */ EFI_SIMPLE_NETWORK_PROTOCOL *snp; /** State that the SNP should be in after close */ UINT32 close_state; }; /** * Transmit packet * * @v netdev Network device * @v iobuf I/O buffer * @ret rc Return status code */ static int snpnet_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { struct snpnet_device *snpnetdev = netdev->priv; EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpnetdev->snp; EFI_STATUS efirc; size_t len = iob_len ( iobuf ); efirc = snp->Transmit ( snp, 0, len, iobuf->data, NULL, NULL, NULL ); return EFIRC_TO_RC ( efirc ); } /** * Find a I/O buffer on the list of outstanding Tx buffers and complete it. * * @v snpnetdev SNP network device * @v txbuf Buffer address */ static void snpnet_complete ( struct net_device *netdev, void *txbuf ) { struct io_buffer *tmp; struct io_buffer *iobuf; list_for_each_entry_safe ( iobuf, tmp, &netdev->tx_queue, list ) { if ( iobuf->data == txbuf ) { netdev_tx_complete ( netdev, iobuf ); break; } } } /** * Poll for received packets * * @v netdev Network device */ static void snpnet_poll ( struct net_device *netdev ) { struct snpnet_device *snpnetdev = netdev->priv; EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpnetdev->snp; EFI_STATUS efirc; struct io_buffer *iobuf = NULL; UINTN len; void *txbuf; /* Process Tx completions */ while ( 1 ) { efirc = snp->GetStatus ( snp, NULL, &txbuf ); if ( efirc ) { DBGC ( snp, "SNP %p could not get status %s\n", snp, efi_strerror ( efirc ) ); break; } if ( txbuf == NULL ) break; snpnet_complete ( netdev, txbuf ); } /* Process received packets */ while ( 1 ) { /* The spec is not clear if the max packet size refers to the * payload or the entire packet including headers. The Receive * function needs a buffer large enough to contain the headers, * and potentially a 4-byte CRC and 4-byte VLAN tag (?), so add * some breathing room. */ len = snp->Mode->MaxPacketSize + ETH_HLEN + 8; iobuf = alloc_iob ( len ); if ( iobuf == NULL ) { netdev_rx_err ( netdev, NULL, -ENOMEM ); break; } efirc = snp->Receive ( snp, NULL, &len, iobuf->data, NULL, NULL, NULL ); /* No packets left? */ if ( efirc == EFI_NOT_READY ) { free_iob ( iobuf ); break; } /* Other error? */ if ( efirc ) { DBGC ( snp, "SNP %p receive packet error: %s " "(len was %zd, is now %zd)\n", snp, efi_strerror ( efirc ), iob_len(iobuf), (size_t)len ); netdev_rx_err ( netdev, iobuf, efirc ); break; } /* Packet is valid, deliver it */ iob_put ( iobuf, len ); netdev_rx ( netdev, iob_disown ( iobuf ) ); } } /** * Open NIC * * @v netdev Net device * @ret rc Return status code */ static int snpnet_open ( struct net_device *netdev ) { struct snpnet_device *snpnetdev = netdev->priv; EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpnetdev->snp; EFI_STATUS efirc; UINT32 enableFlags, disableFlags; snpnetdev->close_state = snp->Mode->State; if ( snp->Mode->State != EfiSimpleNetworkInitialized ) { efirc = snp->Initialize ( snp, 0, 0 ); if ( efirc ) { DBGC ( snp, "SNP %p could not initialize: %s\n", snp, efi_strerror ( efirc ) ); return EFIRC_TO_RC ( efirc ); } } /* Use the default MAC address */ efirc = snp->StationAddress ( snp, FALSE, (EFI_MAC_ADDRESS *)netdev->ll_addr ); if ( efirc ) { DBGC ( snp, "SNP %p could not reset station address: %s\n", snp, efi_strerror ( efirc ) ); } /* Set up receive filters to receive unicast and broadcast packets * always. Also, enable either promiscuous multicast (if possible) or * promiscuous operation, in order to catch all multicast packets. */ enableFlags = snp->Mode->ReceiveFilterMask & ( EFI_SIMPLE_NETWORK_RECEIVE_UNICAST | EFI_SIMPLE_NETWORK_RECEIVE_BROADCAST ); disableFlags = snp->Mode->ReceiveFilterMask & ( EFI_SIMPLE_NETWORK_RECEIVE_MULTICAST | EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS | EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS_MULTICAST ); if ( snp->Mode->ReceiveFilterMask & EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS_MULTICAST ) { enableFlags |= EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS_MULTICAST; } else if ( snp->Mode->ReceiveFilterMask & EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS ) { enableFlags |= EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS; } disableFlags &= ~enableFlags; efirc = snp->ReceiveFilters ( snp, enableFlags, disableFlags, FALSE, 0, NULL ); if ( efirc ) { DBGC ( snp, "SNP %p could not set receive filters: %s\n", snp, efi_strerror ( efirc ) ); } DBGC ( snp, "SNP %p opened\n", snp ); return 0; } /** * Close NIC * * @v netdev Net device */ static void snpnet_close ( struct net_device *netdev ) { struct snpnet_device *snpnetdev = netdev->priv; EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpnetdev->snp; EFI_STATUS efirc; if ( snpnetdev->close_state != EfiSimpleNetworkInitialized ) { efirc = snp->Shutdown ( snp ); if ( efirc ) { DBGC ( snp, "SNP %p could not shut down: %s\n", snp, efi_strerror ( efirc ) ); } } } /** * Enable/disable interrupts * * @v netdev Net device * @v enable Interrupts should be enabled */ static void snpnet_irq ( struct net_device *netdev, int enable ) { struct snpnet_device *snpnetdev = netdev->priv; EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpnetdev->snp; /* On EFI, interrupts are never necessary. (This function is only * required for BIOS PXE.) If interrupts were required, they could be * simulated using a fast timer. */ DBGC ( snp, "SNP %p cannot %s interrupts\n", snp, ( enable ? "enable" : "disable" ) ); } /** SNP network device operations */ static struct net_device_operations snpnet_operations = { .open = snpnet_open, .close = snpnet_close, .transmit = snpnet_transmit, .poll = snpnet_poll, .irq = snpnet_irq, }; /** * Probe SNP device * * @v snpdev SNP device * @ret rc Return status code */ int snpnet_probe ( struct snp_device *snpdev ) { EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpdev->snp; EFI_STATUS efirc; struct net_device *netdev; struct snpnet_device *snpnetdev; int rc; DBGC ( snp, "SNP %p probing...\n", snp ); /* Allocate net device */ netdev = alloc_etherdev ( sizeof ( struct snpnet_device ) ); if ( ! netdev ) return -ENOMEM; netdev_init ( netdev, &snpnet_operations ); netdev->dev = &snpdev->dev; snpdev->netdev = netdev; snpnetdev = netdev->priv; snpnetdev->snp = snp; snpdev->removal_state = snp->Mode->State; /* Start the interface */ if ( snp->Mode->State == EfiSimpleNetworkStopped ) { efirc = snp->Start ( snp ); if ( efirc ) { DBGC ( snp, "SNP %p could not start: %s\n", snp, efi_strerror ( efirc ) ); rc = EFIRC_TO_RC ( efirc ); goto err_start; } } if ( snp->Mode->HwAddressSize > sizeof ( netdev->hw_addr ) ) { DBGC ( snp, "SNP %p hardware address is too large\n", snp ); rc = -EINVAL; goto err_hwaddr; } memcpy ( netdev->hw_addr, snp->Mode->PermanentAddress.Addr, snp->Mode->HwAddressSize ); /* Mark as link up; we don't handle link state */ netdev_link_up ( netdev ); /* Register network device */ if ( ( rc = register_netdev ( netdev ) ) != 0 ) goto err_register; DBGC ( snp, "SNP %p added\n", snp ); return 0; err_register: err_hwaddr: if ( snpdev->removal_state == EfiSimpleNetworkStopped ) snp->Stop ( snp ); err_start: netdev_nullify ( netdev ); netdev_put ( netdev ); snpdev->netdev = NULL; return rc; } /** * Remove SNP device * * @v snpdev SNP device */ void snpnet_remove ( struct snp_device *snpdev ) { EFI_SIMPLE_NETWORK_PROTOCOL *snp = snpdev->snp; EFI_STATUS efirc; struct net_device *netdev = snpdev->netdev; if ( snp->Mode->State == EfiSimpleNetworkInitialized && snpdev->removal_state != EfiSimpleNetworkInitialized ) { DBGC ( snp, "SNP %p shutting down\n", snp ); efirc = snp->Shutdown ( snp ); if ( efirc ) { DBGC ( snp, "SNP %p could not shut down: %s\n", snp, efi_strerror ( efirc ) ); } } if ( snp->Mode->State == EfiSimpleNetworkStarted && snpdev->removal_state == EfiSimpleNetworkStopped ) { DBGC ( snp, "SNP %p stopping\n", snp ); efirc = snp->Stop ( snp ); if ( efirc ) { DBGC ( snp, "SNP %p could not be stopped\n", snp ); } } /* Unregister net device */ unregister_netdev ( netdev ); /* Free network device */ netdev_nullify ( netdev ); netdev_put ( netdev ); DBGC ( snp, "SNP %p removed\n", snp ); }
gpl-2.0
seidler2547/kernel-oxnas820
net/bluetooth/hidp/core.c
7
24910
/* HIDP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/freezer.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/init.h> #include <linux/wait.h> #include <net/sock.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/hidraw.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "hidp.h" #define VERSION "1.2" static DECLARE_RWSEM(hidp_session_sem); static LIST_HEAD(hidp_session_list); static unsigned char hidp_keycode[256] = { 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 150,158,159,128,136,177,178,176,142,152,173,140 }; static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) { struct hidp_session *session; struct list_head *p; BT_DBG(""); list_for_each(p, &hidp_session_list) { session = list_entry(p, struct hidp_session, list); if (!bacmp(bdaddr, &session->bdaddr)) return session; } return NULL; } static void __hidp_link_session(struct hidp_session *session) { __module_get(THIS_MODULE); list_add(&session->list, &hidp_session_list); hci_conn_hold_device(session->conn); } static void __hidp_unlink_session(struct hidp_session *session) { hci_conn_put_device(session->conn); list_del(&session->list); module_put(THIS_MODULE); } static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) { bacpy(&ci->bdaddr, &session->bdaddr); ci->flags = session->flags; ci->state = session->state; ci->vendor = 0x0000; ci->product = 0x0000; ci->version = 0x0000; memset(ci->name, 0, 128); if (session->input) { ci->vendor = session->input->id.vendor; ci->product = session->input->id.product; ci->version = session->input->id.version; if (session->input->name) strncpy(ci->name, session->input->name, 128); else strncpy(ci->name, "HID Boot Device", 128); } if (session->hid) { ci->vendor = session->hid->vendor; ci->product = session->hid->product; ci->version = session->hid->version; strncpy(ci->name, session->hid->name, 128); } } static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned char newleds; struct sk_buff *skb; BT_DBG("session %p type %d code %d value %d", session, type, code, value); if (type != EV_LED) return -1; newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | (!!test_bit(LED_NUML, dev->led)); if (session->leds == newleds) return 0; session->leds = newleds; if (!(skb = alloc_skb(3, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } *skb_put(skb, 1) = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; *skb_put(skb, 1) = 0x01; *skb_put(skb, 1) = newleds; skb_queue_tail(&session->intr_transmit, skb); hidp_schedule(session); return 0; } static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hid_device *hid = input_get_drvdata(dev); struct hidp_session *session = hid->driver_data; return hidp_queue_event(session, dev, type, code, value); } static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hidp_session *session = input_get_drvdata(dev); return hidp_queue_event(session, dev, type, code, value); } static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) { struct input_dev *dev = session->input; unsigned char *keys = session->keys; unsigned char *udata = skb->data + 1; signed char *sdata = skb->data + 1; int i, size = skb->len - 1; switch (skb->data[0]) { case 0x01: /* Keyboard report */ for (i = 0; i < 8; i++) input_report_key(dev, hidp_keycode[i + 224], (udata[0] >> i) & 1); /* If all the key codes have been set to 0x01, it means * too many keys were pressed at the same time. */ if (!memcmp(udata + 2, hidp_mkeyspat, 6)) break; for (i = 2; i < 8; i++) { if (keys[i] > 3 && memscan(udata + 2, keys[i], 6) == udata + 8) { if (hidp_keycode[keys[i]]) input_report_key(dev, hidp_keycode[keys[i]], 0); else BT_ERR("Unknown key (scancode %#x) released.", keys[i]); } if (udata[i] > 3 && memscan(keys + 2, udata[i], 6) == keys + 8) { if (hidp_keycode[udata[i]]) input_report_key(dev, hidp_keycode[udata[i]], 1); else BT_ERR("Unknown key (scancode %#x) pressed.", udata[i]); } } memcpy(keys, udata, 8); break; case 0x02: /* Mouse report */ input_report_key(dev, BTN_LEFT, sdata[0] & 0x01); input_report_key(dev, BTN_RIGHT, sdata[0] & 0x02); input_report_key(dev, BTN_MIDDLE, sdata[0] & 0x04); input_report_key(dev, BTN_SIDE, sdata[0] & 0x08); input_report_key(dev, BTN_EXTRA, sdata[0] & 0x10); input_report_rel(dev, REL_X, sdata[1]); input_report_rel(dev, REL_Y, sdata[2]); if (size > 3) input_report_rel(dev, REL_WHEEL, sdata[3]); break; } input_sync(dev); } static int hidp_queue_report(struct hidp_session *session, unsigned char *data, int size) { struct sk_buff *skb; BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } *skb_put(skb, 1) = 0xa2; if (size > 0) memcpy(skb_put(skb, size), data, size); skb_queue_tail(&session->intr_transmit, skb); hidp_schedule(session); return 0; } static int hidp_send_report(struct hidp_session *session, struct hid_report *report) { unsigned char buf[32]; int rsize; rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0); if (rsize > sizeof(buf)) return -EIO; hid_output_report(report, buf); return hidp_queue_report(session, buf, rsize); } static void hidp_idle_timeout(unsigned long arg) { struct hidp_session *session = (struct hidp_session *) arg; atomic_inc(&session->terminate); hidp_schedule(session); } static void hidp_set_timer(struct hidp_session *session) { if (session->idle_to > 0) mod_timer(&session->timer, jiffies + HZ * session->idle_to); } static inline void hidp_del_timer(struct hidp_session *session) { if (session->idle_to > 0) del_timer(&session->timer); } static int __hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, unsigned char *data, int size) { struct sk_buff *skb; BT_DBG("session %p data %p size %d", session, data, size); if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } *skb_put(skb, 1) = hdr; if (data && size > 0) memcpy(skb_put(skb, size), data, size); skb_queue_tail(&session->ctrl_transmit, skb); return 0; } static inline int hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, unsigned char *data, int size) { int err; err = __hidp_send_ctrl_message(session, hdr, data, size); hidp_schedule(session); return err; } static void hidp_process_handshake(struct hidp_session *session, unsigned char param) { BT_DBG("session %p param 0x%02x", session, param); switch (param) { case HIDP_HSHK_SUCCESSFUL: /* FIXME: Call into SET_ GET_ handlers here */ break; case HIDP_HSHK_NOT_READY: case HIDP_HSHK_ERR_INVALID_REPORT_ID: case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: case HIDP_HSHK_ERR_INVALID_PARAMETER: /* FIXME: Call into SET_ GET_ handlers here */ break; case HIDP_HSHK_ERR_UNKNOWN: break; case HIDP_HSHK_ERR_FATAL: /* Device requests a reboot, as this is the only way this error * can be recovered. */ __hidp_send_ctrl_message(session, HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0); break; default: __hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); break; } } static void hidp_process_hid_control(struct hidp_session *session, unsigned char param) { BT_DBG("session %p param 0x%02x", session, param); if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) { /* Flush the transmit queues */ skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); /* Kill session thread */ atomic_inc(&session->terminate); } } static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb, unsigned char param) { BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param); switch (param) { case HIDP_DATA_RTYPE_INPUT: hidp_set_timer(session); if (session->input) hidp_input_report(session, skb); if (session->hid) hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); break; case HIDP_DATA_RTYPE_OTHER: case HIDP_DATA_RTYPE_OUPUT: case HIDP_DATA_RTYPE_FEATURE: break; default: __hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); } } static void hidp_recv_ctrl_frame(struct hidp_session *session, struct sk_buff *skb) { unsigned char hdr, type, param; BT_DBG("session %p skb %p len %d", session, skb, skb->len); hdr = skb->data[0]; skb_pull(skb, 1); type = hdr & HIDP_HEADER_TRANS_MASK; param = hdr & HIDP_HEADER_PARAM_MASK; switch (type) { case HIDP_TRANS_HANDSHAKE: hidp_process_handshake(session, param); break; case HIDP_TRANS_HID_CONTROL: hidp_process_hid_control(session, param); break; case HIDP_TRANS_DATA: hidp_process_data(session, skb, param); break; default: __hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0); break; } kfree_skb(skb); } static void hidp_recv_intr_frame(struct hidp_session *session, struct sk_buff *skb) { unsigned char hdr; BT_DBG("session %p skb %p len %d", session, skb, skb->len); hdr = skb->data[0]; skb_pull(skb, 1); if (hdr == (HIDP_TRANS_DATA | HIDP_DATA_RTYPE_INPUT)) { hidp_set_timer(session); if (session->input) hidp_input_report(session, skb); if (session->hid) { hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); BT_DBG("report len %d", skb->len); } } else { BT_DBG("Unsupported protocol header 0x%02x", hdr); } kfree_skb(skb); } static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) { struct kvec iv = { data, len }; struct msghdr msg; BT_DBG("sock %p data %p len %d", sock, data, len); if (!len) return 0; memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(sock, &msg, &iv, 1, len); } static void hidp_process_transmit(struct hidp_session *session) { struct sk_buff *skb; BT_DBG("session %p", session); while ((skb = skb_dequeue(&session->ctrl_transmit))) { if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { skb_queue_head(&session->ctrl_transmit, skb); break; } hidp_set_timer(session); kfree_skb(skb); } while ((skb = skb_dequeue(&session->intr_transmit))) { if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { skb_queue_head(&session->intr_transmit, skb); break; } hidp_set_timer(session); kfree_skb(skb); } } static int hidp_session(void *arg) { struct hidp_session *session = arg; struct sock *ctrl_sk = session->ctrl_sock->sk; struct sock *intr_sk = session->intr_sock->sk; struct sk_buff *skb; int vendor = 0x0000, product = 0x0000; wait_queue_t ctrl_wait, intr_wait; BT_DBG("session %p", session); if (session->input) { vendor = session->input->id.vendor; product = session->input->id.product; } if (session->hid) { vendor = session->hid->vendor; product = session->hid->product; } daemonize("khidpd_%04x%04x", vendor, product); set_user_nice(current, -15); init_waitqueue_entry(&ctrl_wait, current); init_waitqueue_entry(&intr_wait, current); add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); add_wait_queue(intr_sk->sk_sleep, &intr_wait); while (!atomic_read(&session->terminate)) { set_current_state(TASK_INTERRUPTIBLE); if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) break; while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { skb_orphan(skb); hidp_recv_ctrl_frame(session, skb); } while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { skb_orphan(skb); hidp_recv_intr_frame(session, skb); } hidp_process_transmit(session); schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(intr_sk->sk_sleep, &intr_wait); remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); down_write(&hidp_session_sem); hidp_del_timer(session); if (session->input) { input_unregister_device(session->input); session->input = NULL; } if (session->hid) { if (session->hid->claimed & HID_CLAIMED_INPUT) hidinput_disconnect(session->hid); if (session->hid->claimed & HID_CLAIMED_HIDRAW) hidraw_disconnect(session->hid); hid_destroy_device(session->hid); session->hid = NULL; } /* Wakeup user-space polling for socket errors */ session->intr_sock->sk->sk_err = EUNATCH; session->ctrl_sock->sk->sk_err = EUNATCH; hidp_schedule(session); fput(session->intr_sock->file); wait_event_timeout(*(ctrl_sk->sk_sleep), (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); fput(session->ctrl_sock->file); __hidp_unlink_session(session); up_write(&hidp_session_sem); kfree(session); return 0; } static struct device *hidp_get_device(struct hidp_session *session) { bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; struct device *device = NULL; struct hci_dev *hdev; hdev = hci_get_route(dst, src); if (!hdev) return NULL; session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (session->conn) device = &session->conn->dev; hci_dev_put(hdev); return device; } static int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) { struct input_dev *input; int err, i; input = input_allocate_device(); if (!input) return -ENOMEM; session->input = input; input_set_drvdata(input, session); input->name = "Bluetooth HID Boot Protocol Device"; input->id.bustype = BUS_BLUETOOTH; input->id.vendor = req->vendor; input->id.product = req->product; input->id.version = req->version; if (req->subclass & 0x40) { set_bit(EV_KEY, input->evbit); set_bit(EV_LED, input->evbit); set_bit(EV_REP, input->evbit); set_bit(LED_NUML, input->ledbit); set_bit(LED_CAPSL, input->ledbit); set_bit(LED_SCROLLL, input->ledbit); set_bit(LED_COMPOSE, input->ledbit); set_bit(LED_KANA, input->ledbit); for (i = 0; i < sizeof(hidp_keycode); i++) set_bit(hidp_keycode[i], input->keybit); clear_bit(0, input->keybit); } if (req->subclass & 0x80) { input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); input->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); input->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); input->relbit[0] |= BIT_MASK(REL_WHEEL); } input->dev.parent = hidp_get_device(session); input->event = hidp_input_event; err = input_register_device(input); if (err < 0) { hci_conn_put_device(session->conn); return err; } return 0; } static int hidp_open(struct hid_device *hid) { return 0; } static void hidp_close(struct hid_device *hid) { } static int hidp_parse(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; struct hidp_connadd_req *req = session->req; unsigned char *buf; int ret; buf = kmalloc(req->rd_size, GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, req->rd_data, req->rd_size)) { kfree(buf); return -EFAULT; } ret = hid_parse_report(session->hid, buf, req->rd_size); kfree(buf); if (ret) return ret; session->req = NULL; return 0; } static int hidp_start(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; struct hid_report *report; list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT]. report_list, list) hidp_send_report(session, report); list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT]. report_list, list) hidp_send_report(session, report); return 0; } static void hidp_stop(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); if (hid->claimed & HID_CLAIMED_INPUT) hidinput_disconnect(hid); hid->claimed = 0; } static struct hid_ll_driver hidp_hid_driver = { .parse = hidp_parse, .start = hidp_start, .stop = hidp_stop, .open = hidp_open, .close = hidp_close, .hidinput_input_event = hidp_hidinput_event, }; static int hidp_setup_hid(struct hidp_session *session, struct hidp_connadd_req *req) { struct hid_device *hid; bdaddr_t src, dst; int err; hid = hid_allocate_device(); if (IS_ERR(hid)) return PTR_ERR(session->hid); session->hid = hid; session->req = req; hid->driver_data = session; baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); baswap(&dst, &bt_sk(session->ctrl_sock->sk)->dst); hid->bus = BUS_BLUETOOTH; hid->vendor = req->vendor; hid->product = req->product; hid->version = req->version; hid->country = req->country; strncpy(hid->name, req->name, 128); strncpy(hid->phys, batostr(&src), 64); strncpy(hid->uniq, batostr(&dst), 64); hid->dev.parent = hidp_get_device(session); hid->ll_driver = &hidp_hid_driver; err = hid_add_device(hid); if (err < 0) goto failed; return 0; failed: hid_destroy_device(hid); session->hid = NULL; return err; } int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) { struct hidp_session *session, *s; int err; BT_DBG(""); if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) || bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) return -ENOTUNIQ; session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); if (!session) return -ENOMEM; BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); down_write(&hidp_session_sem); s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); if (s && s->state == BT_CONNECTED) { err = -EEXIST; goto failed; } bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); session->ctrl_sock = ctrl_sock; session->intr_sock = intr_sock; session->state = BT_CONNECTED; setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); skb_queue_head_init(&session->ctrl_transmit); skb_queue_head_init(&session->intr_transmit); session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); session->idle_to = req->idle_to; if (req->rd_size > 0) { err = hidp_setup_hid(session, req); if (err && err != -ENODEV) goto purge; } if (!session->hid) { err = hidp_setup_input(session, req); if (err < 0) goto purge; } __hidp_link_session(session); hidp_set_timer(session); err = kernel_thread(hidp_session, session, CLONE_KERNEL); if (err < 0) goto unlink; if (session->input) { hidp_send_ctrl_message(session, HIDP_TRANS_SET_PROTOCOL | HIDP_PROTO_BOOT, NULL, 0); session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE); session->leds = 0xff; hidp_input_event(session->input, EV_LED, 0, 0); } up_write(&hidp_session_sem); return 0; unlink: hidp_del_timer(session); __hidp_unlink_session(session); if (session->input) { input_unregister_device(session->input); session->input = NULL; } if (session->hid) { hid_destroy_device(session->hid); session->hid = NULL; } purge: skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); failed: up_write(&hidp_session_sem); input_free_device(session->input); kfree(session); return err; } int hidp_del_connection(struct hidp_conndel_req *req) { struct hidp_session *session; int err = 0; BT_DBG(""); down_read(&hidp_session_sem); session = __hidp_get_session(&req->bdaddr); if (session) { if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) { hidp_send_ctrl_message(session, HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0); } else { /* Flush the transmit queues */ skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); /* Wakeup user-space polling for socket errors */ session->intr_sock->sk->sk_err = EUNATCH; session->ctrl_sock->sk->sk_err = EUNATCH; /* Kill session thread */ atomic_inc(&session->terminate); hidp_schedule(session); } } else err = -ENOENT; up_read(&hidp_session_sem); return err; } int hidp_get_connlist(struct hidp_connlist_req *req) { struct list_head *p; int err = 0, n = 0; BT_DBG(""); down_read(&hidp_session_sem); list_for_each(p, &hidp_session_list) { struct hidp_session *session; struct hidp_conninfo ci; session = list_entry(p, struct hidp_session, list); __hidp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { err = -EFAULT; break; } if (++n >= req->cnum) break; req->ci++; } req->cnum = n; up_read(&hidp_session_sem); return err; } int hidp_get_conninfo(struct hidp_conninfo *ci) { struct hidp_session *session; int err = 0; down_read(&hidp_session_sem); session = __hidp_get_session(&ci->bdaddr); if (session) __hidp_copy_session(session, ci); else err = -ENOENT; up_read(&hidp_session_sem); return err; } static const struct hid_device_id hidp_table[] = { { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) }, { } }; static struct hid_driver hidp_driver = { .name = "generic-bluetooth", .id_table = hidp_table, }; static int __init hidp_init(void) { int ret; l2cap_load(); BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); ret = hid_register_driver(&hidp_driver); if (ret) goto err; ret = hidp_init_sockets(); if (ret) goto err_drv; return 0; err_drv: hid_unregister_driver(&hidp_driver); err: return ret; } static void __exit hidp_exit(void) { hidp_cleanup_sockets(); hid_unregister_driver(&hidp_driver); } module_init(hidp_init); module_exit(hidp_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-6");
gpl-2.0
Bloodyaugust/sugarlabcppboilerplate
lib/boost/libs/config/test/no_cxx14_lambda_capture_fail.cpp
7
1149
// This file was automatically generated on Sat Oct 11 19:26:17 2014 // by libs/config/tools/generate.cpp // Copyright John Maddock 2002-4. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org/libs/config for the most recent version.// // Revision $Id$ // // Test file for macro BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES // This file should not compile, if it does then // BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES should not be defined. // See file boost_no_cxx14_lambda_capture.ipp for details // Must not have BOOST_ASSERT_CONFIG set; it defeats // the objective of this file: #ifdef BOOST_ASSERT_CONFIG # undef BOOST_ASSERT_CONFIG #endif #include <boost/config.hpp> #include "test.hpp" #ifdef BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES #include "boost_no_cxx14_lambda_capture.ipp" #else #error "this file should not compile" #endif int main( int, char *[] ) { return boost_no_cxx14_initialized_lambda_captures::test(); }
gpl-2.0
zhuolinho/linphone
submodules/externals/polarssl/programs/random/gen_entropy.c
7
2347
/** * \brief Use and generate multiple entropies calls into a file * * Copyright (C) 2006-2011, Brainspark B.V. * * This file is part of PolarSSL (http://www.polarssl.org) * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #if !defined(POLARSSL_CONFIG_FILE) #include "polarssl/config.h" #else #include POLARSSL_CONFIG_FILE #endif #include "polarssl/entropy.h" #include <stdio.h> #if !defined(POLARSSL_ENTROPY_C) int main( int argc, char *argv[] ) { ((void) argc); ((void) argv); printf("POLARSSL_ENTROPY_C not defined.\n"); return( 0 ); } #else int main( int argc, char *argv[] ) { FILE *f; int i, k, ret; entropy_context entropy; unsigned char buf[ENTROPY_BLOCK_SIZE]; if( argc < 2 ) { fprintf( stderr, "usage: %s <output filename>\n", argv[0] ); return( 1 ); } if( ( f = fopen( argv[1], "wb+" ) ) == NULL ) { printf( "failed to open '%s' for writing.\n", argv[0] ); return( 1 ); } entropy_init( &entropy ); for( i = 0, k = 768; i < k; i++ ) { ret = entropy_func( &entropy, buf, sizeof( buf ) ); if( ret != 0 ) { printf("failed!\n"); goto cleanup; } fwrite( buf, 1, sizeof( buf ), f ); printf( "Generating 32Mb of data in file '%s'... %04.1f" \ "%% done\r", argv[1], (100 * (float) (i + 1)) / k ); fflush( stdout ); } ret = 0; cleanup: fclose( f ); entropy_free( &entropy ); return( ret ); } #endif /* POLARSSL_ENTROPY_C */
gpl-2.0
christianjann/L4T_PREEMPT_RT
arch/arm/mach-tegra/tegra12_clocks.c
7
296370
/* * arch/arm/mach-tegra/tegra12_clocks.c * * Copyright (C) 2011-2014 NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/syscore_ops.h> #include <linux/platform_device.h> #include <linux/tegra-soc.h> #include <linux/tegra-fuse.h> #include <asm/clkdev.h> #include <mach/edp.h> #include <mach/mc.h> #include "clock.h" #include "dvfs.h" #include "iomap.h" #include "pm.h" #include "sleep.h" #include "devices.h" #include "tegra12_emc.h" #include "tegra_cl_dvfs.h" #include "cpu-tegra.h" #include "tegra11_soctherm.h" #define RST_DEVICES_L 0x004 #define RST_DEVICES_H 0x008 #define RST_DEVICES_U 0x00C #define RST_DEVICES_V 0x358 #define RST_DEVICES_W 0x35C #define RST_DEVICES_X 0x28C #define RST_DEVICES_SET_L 0x300 #define RST_DEVICES_CLR_L 0x304 #define RST_DEVICES_SET_V 0x430 #define RST_DEVICES_CLR_V 0x434 #define RST_DEVICES_SET_X 0x290 #define RST_DEVICES_CLR_X 0x294 #define RST_DEVICES_NUM 6 #define CLK_OUT_ENB_L 0x010 #define CLK_OUT_ENB_H 0x014 #define CLK_OUT_ENB_U 0x018 #define CLK_OUT_ENB_V 0x360 #define CLK_OUT_ENB_W 0x364 #define CLK_OUT_ENB_X 0x280 #define CLK_OUT_ENB_SET_L 0x320 #define CLK_OUT_ENB_CLR_L 0x324 #define CLK_OUT_ENB_SET_V 0x440 #define CLK_OUT_ENB_CLR_V 0x444 #define CLK_OUT_ENB_SET_X 0x284 #define CLK_OUT_ENB_CLR_X 0x288 #define CLK_OUT_ENB_NUM 6 #define CLK_OUT_ENB_L_RESET_MASK 0xfcd7dff1 #define CLK_OUT_ENB_H_RESET_MASK 0xefddfff7 #define CLK_OUT_ENB_U_RESET_MASK 0xfbfefbfa #define CLK_OUT_ENB_V_RESET_MASK 0xffc1fffb #define CLK_OUT_ENB_W_RESET_MASK 0x3f7fbfff #define CLK_OUT_ENB_X_RESET_MASK 0x00170979 #define RST_DEVICES_V_SWR_CPULP_RST_DIS (0x1 << 1) /* Reserved on Tegra11 */ #define CLK_OUT_ENB_V_CLK_ENB_CPULP_EN (0x1 << 1) #define PERIPH_CLK_TO_BIT(c) (1 << (c->u.periph.clk_num % 32)) #define PERIPH_CLK_TO_RST_REG(c) \ periph_clk_to_reg((c), RST_DEVICES_L, RST_DEVICES_V, RST_DEVICES_X, 4) #define PERIPH_CLK_TO_RST_SET_REG(c) \ periph_clk_to_reg((c), RST_DEVICES_SET_L, RST_DEVICES_SET_V, \ RST_DEVICES_SET_X, 8) #define PERIPH_CLK_TO_RST_CLR_REG(c) \ periph_clk_to_reg((c), RST_DEVICES_CLR_L, RST_DEVICES_CLR_V, \ RST_DEVICES_CLR_X, 8) #define PERIPH_CLK_TO_ENB_REG(c) \ periph_clk_to_reg((c), CLK_OUT_ENB_L, CLK_OUT_ENB_V, CLK_OUT_ENB_X, 4) #define PERIPH_CLK_TO_ENB_SET_REG(c) \ periph_clk_to_reg((c), CLK_OUT_ENB_SET_L, CLK_OUT_ENB_SET_V, \ CLK_OUT_ENB_SET_X, 8) #define PERIPH_CLK_TO_ENB_CLR_REG(c) \ periph_clk_to_reg((c), CLK_OUT_ENB_CLR_L, CLK_OUT_ENB_CLR_V, \ CLK_OUT_ENB_CLR_X, 8) #define IS_PERIPH_IN_RESET(c) \ (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c)) #define CLK_MASK_ARM 0x44 #define MISC_CLK_ENB 0x48 #define OSC_CTRL 0x50 #define OSC_CTRL_OSC_FREQ_MASK (0xF<<28) #define OSC_CTRL_OSC_FREQ_13MHZ (0x0<<28) #define OSC_CTRL_OSC_FREQ_19_2MHZ (0x4<<28) #define OSC_CTRL_OSC_FREQ_12MHZ (0x8<<28) #define OSC_CTRL_OSC_FREQ_26MHZ (0xC<<28) #define OSC_CTRL_OSC_FREQ_16_8MHZ (0x1<<28) #define OSC_CTRL_OSC_FREQ_38_4MHZ (0x5<<28) #define OSC_CTRL_OSC_FREQ_48MHZ (0x9<<28) #define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK) #define OSC_CTRL_PLL_REF_DIV_MASK (3<<26) #define OSC_CTRL_PLL_REF_DIV_1 (0<<26) #define OSC_CTRL_PLL_REF_DIV_2 (1<<26) #define OSC_CTRL_PLL_REF_DIV_4 (2<<26) #define PERIPH_CLK_SOURCE_I2S1 0x100 #define PERIPH_CLK_SOURCE_EMC 0x19c #define PERIPH_CLK_SOURCE_EMC_MC_SAME (1<<16) #define PERIPH_CLK_SOURCE_LA 0x1f8 #define PERIPH_CLK_SOURCE_NUM1 \ ((PERIPH_CLK_SOURCE_LA - PERIPH_CLK_SOURCE_I2S1) / 4) #define PERIPH_CLK_SOURCE_MSELECT 0x3b4 #define PERIPH_CLK_SOURCE_SE 0x42c #define PERIPH_CLK_SOURCE_NUM2 \ ((PERIPH_CLK_SOURCE_SE - PERIPH_CLK_SOURCE_MSELECT) / 4 + 1) #define AUDIO_DLY_CLK 0x49c #define AUDIO_SYNC_CLK_SPDIF 0x4b4 #define PERIPH_CLK_SOURCE_NUM3 \ ((AUDIO_SYNC_CLK_SPDIF - AUDIO_DLY_CLK) / 4 + 1) #define SPARE_REG 0x55c #define SPARE_REG_CLK_M_DIVISOR_SHIFT 2 #define SPARE_REG_CLK_M_DIVISOR_MASK (3 << SPARE_REG_CLK_M_DIVISOR_SHIFT) #define PERIPH_CLK_SOURCE_XUSB_HOST 0x600 #define PERIPH_CLK_SOURCE_VIC 0x678 #define PERIPH_CLK_SOURCE_NUM4 \ ((PERIPH_CLK_SOURCE_VIC - PERIPH_CLK_SOURCE_XUSB_HOST) / 4 + 1) #define PERIPH_CLK_SOURCE_NUM (PERIPH_CLK_SOURCE_NUM1 + \ PERIPH_CLK_SOURCE_NUM2 + \ PERIPH_CLK_SOURCE_NUM3 + \ PERIPH_CLK_SOURCE_NUM4) #define CPU_SOFTRST_CTRL 0x380 #define CPU_SOFTRST_CTRL1 0x384 #define CPU_SOFTRST_CTRL2 0x388 #define PERIPH_CLK_SOURCE_DIVU71_MASK 0xFF #define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF #define PERIPH_CLK_SOURCE_DIV_SHIFT 0 #define PERIPH_CLK_SOURCE_DIVIDLE_SHIFT 8 #define PERIPH_CLK_SOURCE_DIVIDLE_VAL 50 #define PERIPH_CLK_UART_DIV_ENB (1<<24) #define PERIPH_CLK_VI_SEL_EX_SHIFT 24 #define PERIPH_CLK_VI_SEL_EX_MASK (0x3<<PERIPH_CLK_VI_SEL_EX_SHIFT) #define PERIPH_CLK_NAND_DIV_EX_ENB (1<<8) #define PERIPH_CLK_DTV_POLARITY_INV (1<<25) #define AUDIO_SYNC_SOURCE_MASK 0x0F #define AUDIO_SYNC_DISABLE_BIT 0x10 #define AUDIO_SYNC_TAP_NIBBLE_SHIFT(c) ((c->reg_shift - 24) * 4) #define PERIPH_CLK_SOR_CLK_SEL_SHIFT 14 #define PERIPH_CLK_SOR_CLK_SEL_MASK (0x3<<PERIPH_CLK_SOR_CLK_SEL_SHIFT) /* PLL common */ #define PLL_BASE 0x0 #define PLL_BASE_BYPASS (1<<31) #define PLL_BASE_ENABLE (1<<30) #define PLL_BASE_REF_ENABLE (1<<29) #define PLL_BASE_OVERRIDE (1<<28) #define PLL_BASE_LOCK (1<<27) #define PLL_BASE_DIVP_MASK (0x7<<20) #define PLL_BASE_DIVP_SHIFT 20 #define PLL_BASE_DIVN_MASK (0x3FF<<8) #define PLL_BASE_DIVN_SHIFT 8 #define PLL_BASE_DIVM_MASK (0x1F) #define PLL_BASE_DIVM_SHIFT 0 #define PLL_BASE_PARSE(pll, cfg, b) \ do { \ (cfg).m = ((b) & pll##_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; \ (cfg).n = ((b) & pll##_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; \ (cfg).p = ((b) & pll##_BASE_DIVP_MASK) >> PLL_BASE_DIVP_SHIFT; \ } while (0) #define PLL_OUT_RATIO_MASK (0xFF<<8) #define PLL_OUT_RATIO_SHIFT 8 #define PLL_OUT_OVERRIDE (1<<2) #define PLL_OUT_CLKEN (1<<1) #define PLL_OUT_RESET_DISABLE (1<<0) #define PLL_MISC(c) \ (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc) #define PLL_MISCN(c, n) \ ((c)->u.pll.misc1 + ((n) - 1) * PLL_MISC(c)) #define PLL_MISC_LOCK_ENABLE(c) \ (((c)->flags & (PLLU | PLLD)) ? (1<<22) : (1<<18)) #define PLL_MISC_DCCON_SHIFT 20 #define PLL_MISC_CPCON_SHIFT 8 #define PLL_MISC_CPCON_MASK (0xF<<PLL_MISC_CPCON_SHIFT) #define PLL_MISC_LFCON_SHIFT 4 #define PLL_MISC_LFCON_MASK (0xF<<PLL_MISC_LFCON_SHIFT) #define PLL_MISC_VCOCON_SHIFT 0 #define PLL_MISC_VCOCON_MASK (0xF<<PLL_MISC_VCOCON_SHIFT) #define PLL_FIXED_MDIV(c, ref) ((ref) > (c)->u.pll.cf_max ? 2 : 1) /* PLLU */ #define PLLU_BASE_OVERRIDE (1<<24) #define PLLU_BASE_POST_DIV (1<<20) /* PLLD */ #define PLLD_BASE_DIVN_MASK (0x7FF<<8) #define PLLD_BASE_CSI_CLKENABLE (1<<26) #define PLLD_BASE_DSI_MUX_SHIFT 25 #define PLLD_BASE_DSI_MUX_MASK (1<<PLLD_BASE_DSI_MUX_SHIFT) #define PLLD_BASE_CSI_CLKSOURCE (1<<23) #define PLLD_MISC_DSI_CLKENABLE (1<<30) #define PLLD_MISC_DIV_RST (1<<23) #define PLLD_MISC_DCCON_SHIFT 12 #define PLLDU_LFCON 2 /* PLLC2 and PLLC3 (PLLCX) */ #define PLLCX_USE_DYN_RAMP 0 #define PLLCX_BASE_PHASE_LOCK (1<<26) #define PLLCX_BASE_DIVP_MASK (0x7<<PLL_BASE_DIVP_SHIFT) #define PLLCX_BASE_DIVN_MASK (0xFF<<PLL_BASE_DIVN_SHIFT) #define PLLCX_BASE_DIVM_MASK (0x3<<PLL_BASE_DIVM_SHIFT) #define PLLCX_PDIV_MAX ((PLLCX_BASE_DIVP_MASK >> PLL_BASE_DIVP_SHIFT)) #define PLLCX_IS_DYN(new_p, old_p) (((new_p) <= 8) && ((old_p) <= 8)) #define PLLCX_MISC_STROBE (1<<31) #define PLLCX_MISC_RESET (1<<30) #define PLLCX_MISC_SDM_DIV_SHIFT 28 #define PLLCX_MISC_SDM_DIV_MASK (0x3 << PLLCX_MISC_SDM_DIV_SHIFT) #define PLLCX_MISC_FILT_DIV_SHIFT 26 #define PLLCX_MISC_FILT_DIV_MASK (0x3 << PLLCX_MISC_FILT_DIV_SHIFT) #define PLLCX_MISC_ALPHA_SHIFT 18 #define PLLCX_MISC_ALPHA_MASK (0xFF << PLLCX_MISC_ALPHA_SHIFT) #define PLLCX_MISC_KB_SHIFT 9 #define PLLCX_MISC_KB_MASK (0x1FF << PLLCX_MISC_KB_SHIFT) #define PLLCX_MISC_KA_SHIFT 2 #define PLLCX_MISC_KA_MASK (0x7F << PLLCX_MISC_KA_SHIFT) #define PLLCX_MISC_VCO_GAIN_SHIFT 0 #define PLLCX_MISC_VCO_GAIN_MASK (0x3 << PLLCX_MISC_VCO_GAIN_SHIFT) #define PLLCX_MISC_KOEF_LOW_RANGE \ ((0x14 << PLLCX_MISC_KA_SHIFT) | (0x23 << PLLCX_MISC_KB_SHIFT)) #define PLLCX_MISC_DIV_LOW_RANGE \ ((0x1 << PLLCX_MISC_SDM_DIV_SHIFT) | (0x1 << PLLCX_MISC_FILT_DIV_SHIFT)) #define PLLCX_MISC_DIV_HIGH_RANGE \ ((0x2 << PLLCX_MISC_SDM_DIV_SHIFT) | (0x2 << PLLCX_MISC_FILT_DIV_SHIFT)) #define PLLCX_FD_ULCK_FRM_SHIFT 12 #define PLLCX_FD_ULCK_FRM_MASK (0x3 << PLLCX_FD_ULCK_FRM_SHIFT) #define PLLCX_FD_LCK_FRM_SHIFT 8 #define PLLCX_FD_LCK_FRM_MASK (0x3 << PLLCX_FD_LCK_FRM_SHIFT) #define PLLCX_PD_ULCK_FRM_SHIFT 28 #define PLLCX_PD_ULCK_FRM_MASK (0x3 << PLLCX_PD_ULCK_FRM_SHIFT) #define PLLCX_PD_LCK_FRM_SHIFT 24 #define PLLCX_PD_LCK_FRM_MASK (0x3 << PLLCX_PD_LCK_FRM_SHIFT) #define PLLCX_PD_OUT_HYST_SHIFT 20 #define PLLCX_PD_OUT_HYST_MASK (0x3 << PLLCX_PD_OUT_HYST_SHIFT) #define PLLCX_PD_IN_HYST_SHIFT 16 #define PLLCX_PD_IN_HYST_MASK (0x3 << PLLCX_PD_IN_HYST_SHIFT) #define PLLCX_MISC_DEFAULT_VALUE ((0x0 << PLLCX_MISC_VCO_GAIN_SHIFT) | \ PLLCX_MISC_KOEF_LOW_RANGE | \ (0x19 << PLLCX_MISC_ALPHA_SHIFT) | \ PLLCX_MISC_DIV_LOW_RANGE | \ PLLCX_MISC_RESET) #define PLLCX_MISC1_DEFAULT_VALUE 0x000d2308 #define PLLCX_MISC2_DEFAULT_VALUE ((0x2 << PLLCX_PD_ULCK_FRM_SHIFT) | \ (0x1 << PLLCX_PD_LCK_FRM_SHIFT) | \ (0x3 << PLLCX_PD_OUT_HYST_SHIFT) | \ (0x1 << PLLCX_PD_IN_HYST_SHIFT) | \ (0x2 << PLLCX_FD_ULCK_FRM_SHIFT) | \ (0x2 << PLLCX_FD_LCK_FRM_SHIFT)) #define PLLCX_MISC3_DEFAULT_VALUE 0x200 #define PLLCX_MISC1_IDDQ (0x1 << 27) /* PLLX and PLLC (PLLXC)*/ #define PLLXC_USE_DYN_RAMP 0 #define PLLXC_BASE_DIVP_MASK (0xF<<PLL_BASE_DIVP_SHIFT) #define PLLXC_BASE_DIVN_MASK (0xFF<<PLL_BASE_DIVN_SHIFT) #define PLLXC_BASE_DIVM_MASK (0xFF<<PLL_BASE_DIVM_SHIFT) /* PLLXC has 4-bit PDIV, but entry 15 is not allowed in h/w, and s/w usage is limited to 5 */ #define PLLXC_PDIV_MAX 14 #define PLLXC_SW_PDIV_MAX 5 /* PLLX */ #define PLLX_MISC2_DYNRAMP_STEPB_SHIFT 24 #define PLLX_MISC2_DYNRAMP_STEPB_MASK (0xFF << PLLX_MISC2_DYNRAMP_STEPB_SHIFT) #define PLLX_MISC2_DYNRAMP_STEPA_SHIFT 16 #define PLLX_MISC2_DYNRAMP_STEPA_MASK (0xFF << PLLX_MISC2_DYNRAMP_STEPA_SHIFT) #define PLLX_MISC2_NDIV_NEW_SHIFT 8 #define PLLX_MISC2_NDIV_NEW_MASK (0xFF << PLLX_MISC2_NDIV_NEW_SHIFT) #define PLLX_MISC2_LOCK_OVERRIDE (0x1 << 4) #define PLLX_MISC2_DYNRAMP_DONE (0x1 << 2) #define PLLX_MISC2_CLAMP_NDIV (0x1 << 1) #define PLLX_MISC2_EN_DYNRAMP (0x1 << 0) #define PLLX_MISC3_IDDQ (0x1 << 3) #ifndef CONFIG_ARCH_TEGRA_13x_SOC #define PLLX_HW_CTRL_CFG 0x548 #else #define PLLX_HW_CTRL_CFG 0x14 #endif #define PLLX_HW_CTRL_CFG_SWCTRL (0x1 << 0) /* PLLC */ #define PLLC_BASE_LOCK_OVERRIDE (1<<28) #define PLLC_MISC_IDDQ (0x1 << 26) #define PLLC_MISC_LOCK_ENABLE (0x1 << 24) #define PLLC_MISC1_CLAMP_NDIV (0x1 << 26) #define PLLC_MISC1_EN_DYNRAMP (0x1 << 25) #define PLLC_MISC1_DYNRAMP_STEPA_SHIFT 17 #define PLLC_MISC1_DYNRAMP_STEPA_MASK (0xFF << PLLC_MISC1_DYNRAMP_STEPA_SHIFT) #define PLLC_MISC1_DYNRAMP_STEPB_SHIFT 9 #define PLLC_MISC1_DYNRAMP_STEPB_MASK (0xFF << PLLC_MISC1_DYNRAMP_STEPB_SHIFT) #define PLLC_MISC1_NDIV_NEW_SHIFT 1 #define PLLC_MISC1_NDIV_NEW_MASK (0xFF << PLLC_MISC1_NDIV_NEW_SHIFT) #define PLLC_MISC1_DYNRAMP_DONE (0x1 << 0) /* PLLM */ #define PLLM_BASE_DIVP_MASK (0xF << PLL_BASE_DIVP_SHIFT) #define PLLM_BASE_DIVN_MASK (0xFF << PLL_BASE_DIVN_SHIFT) #define PLLM_BASE_DIVM_MASK (0xFF << PLL_BASE_DIVM_SHIFT) /* PLLM has 4-bit PDIV, but entry 15 is not allowed in h/w, and s/w usage is limited to 5 */ #define PLLM_PDIV_MAX 14 #define PLLM_SW_PDIV_MAX 5 #define PLLM_MISC_FSM_SW_OVERRIDE (0x1 << 10) #define PLLM_MISC_IDDQ (0x1 << 5) #define PLLM_MISC_LOCK_DISABLE (0x1 << 4) #define PLLM_MISC_LOCK_OVERRIDE (0x1 << 3) #define PMC_PLLP_WB0_OVERRIDE 0xf8 #define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE (1 << 12) #define PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE (1 << 11) /* M, N layout for PLLM override and base registers are the same */ #define PMC_PLLM_WB0_OVERRIDE 0x1dc #define PMC_PLLM_WB0_OVERRIDE_2 0x2b0 #define PMC_PLLM_WB0_OVERRIDE_2_DIVP_SHIFT 27 #define PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK (0xF << 27) /* PLLSS */ #define PLLSS_CFG(c) ((c)->u.pll.misc1 + 0) #define PLLSS_CTRL1(c) ((c)->u.pll.misc1 + 4) #define PLLSS_CTRL2(c) ((c)->u.pll.misc1 + 8) #define PLLSS_BASE_DIVP_MASK (0xF << PLL_BASE_DIVP_SHIFT) #define PLLSS_BASE_DIVN_MASK (0xFF << PLL_BASE_DIVN_SHIFT) #define PLLSS_BASE_DIVM_MASK (0xFF << PLL_BASE_DIVM_SHIFT) #define PLLSS_BASE_SOURCE_SHIFT 25 #define PLLSS_BASE_SOURCE_MASK (3 << PLLSS_BASE_SOURCE_SHIFT) /* PLLSS has 4-bit PDIV, but entry 15 is not allowed in h/w, and s/w usage is limited to 5 */ #define PLLSS_PDIV_MAX 14 #define PLLSS_SW_PDIV_MAX 5 #define PLLSS_MISC_LOCK_ENABLE (0x1 << 30) #define PLLSS_MISC_KCP_SHIFT 25 #define PLLSS_MISC_KCP_MASK (0x3 << PLLSS_MISC_KCP_SHIFT) #define PLLSS_MISC_KVCO_SHIFT 24 #define PLLSS_MISC_KVCO_MASK (0x1 << PLLSS_MISC_KVCO_SHIFT) #define PLLSS_MISC_SETUP_SHIFT 0 #define PLLSS_MISC_SETUP_MASK (0xFFFFFF << PLLSS_MISC_SETUP_SHIFT) #define PLLSS_BASE_LOCK_OVERRIDE (0x1 << 24) #define PLLSS_BASE_LOCK (0x1 << 27) #define PLLSS_BASE_IDDQ (0x1 << 19) #define PLLSS_MISC_DEFAULT_VALUE ( \ (PLLSS_MISC_KVCO << PLLSS_MISC_KVCO_SHIFT) | \ (PLLSS_MISC_SETUP << PLLSS_MISC_SETUP_SHIFT)) #define PLLSS_CFG_DEFAULT_VALUE ( \ (PLLSS_EN_SDM << 31) | \ (PLLSS_EN_SSC << 30) | \ (PLLSS_EN_DITHER2 << 29) | \ (PLLSS_EN_DITHER << 28) | \ (PLLSS_SDM_RESET << 27) | \ (PLLSS_CLAMP << 22)) #define PLLSS_CTRL1_DEFAULT_VALUE \ ((PLLSS_SDM_SSC_MAX << 16) | (PLLSS_SDM_SSC_MIN << 0)) #define PLLSS_CTRL2_DEFAULT_VALUE \ ((PLLSS_SDM_SSC_STEP << 16) | (PLLSS_SDM_DIN << 0)) /* PLLSS configuration */ #define PLLSS_MISC_KVCO 0 #define PLLSS_MISC_SETUP 0 #define PLLSS_EN_SDM 0 #define PLLSS_EN_SSC 0 #define PLLSS_EN_DITHER2 0 #define PLLSS_EN_DITHER 1 #define PLLSS_SDM_RESET 0 #define PLLSS_CLAMP 0 #define PLLSS_SDM_SSC_MAX 0 #define PLLSS_SDM_SSC_MIN 0 #define PLLSS_SDM_SSC_STEP 0 #define PLLSS_SDM_DIN 0 /* PLLDP SS parameters */ #define PLLDP_SS_CTRL1_0_DEFAULT_VALUE 0xF000E5EC #define PLLDP_SS_CTRL2_0_DEFAULT_VALUE 0x101BF000 #define PLLDP_SS_CFG_0_DEFAULT_VALUE 0xC0000000 /* PLLRE */ #define PLLRE_BASE_DIVP_SHIFT 16 #define PLLRE_BASE_DIVP_MASK (0xF << PLLRE_BASE_DIVP_SHIFT) #define PLLRE_BASE_DIVN_MASK (0xFF << PLL_BASE_DIVN_SHIFT) #define PLLRE_BASE_DIVM_MASK (0xFF << PLL_BASE_DIVM_SHIFT) /* PLLRE has 4-bit PDIV, but entry 15 is not allowed in h/w, and s/w usage is limited to 5 */ #define PLLRE_PDIV_MAX 14 #define PLLRE_SW_PDIV_MAX 5 #define PLLRE_MISC_LOCK_ENABLE (0x1 << 30) #define PLLRE_MISC_LOCK_OVERRIDE (0x1 << 29) #define PLLRE_MISC_LOCK (0x1 << 24) #define PLLRE_MISC_IDDQ (0x1 << 16) #define OUT_OF_TABLE_CPCON 0x8 #define SUPER_CLK_MUX 0x00 #define SUPER_STATE_SHIFT 28 #define SUPER_STATE_MASK (0xF << SUPER_STATE_SHIFT) #define SUPER_STATE_STANDBY (0x0 << SUPER_STATE_SHIFT) #define SUPER_STATE_IDLE (0x1 << SUPER_STATE_SHIFT) #define SUPER_STATE_RUN (0x2 << SUPER_STATE_SHIFT) #define SUPER_STATE_IRQ (0x3 << SUPER_STATE_SHIFT) #define SUPER_STATE_FIQ (0x4 << SUPER_STATE_SHIFT) #define SUPER_LP_DIV2_BYPASS (0x1 << 16) #define SUPER_SOURCE_MASK 0xF #define SUPER_FIQ_SOURCE_SHIFT 12 #define SUPER_IRQ_SOURCE_SHIFT 8 #define SUPER_RUN_SOURCE_SHIFT 4 #define SUPER_IDLE_SOURCE_SHIFT 0 #define SUPER_CLK_DIVIDER 0x04 #define SUPER_CLOCK_DIV_U71_SHIFT 16 #define SUPER_CLOCK_DIV_U71_MASK (0xff << SUPER_CLOCK_DIV_U71_SHIFT) #define CLK13_SOURCE_SHIFT 28 #define CLK13_SOURCE_MASK 0xF #define BUS_CLK_DISABLE (1<<3) #define BUS_CLK_DIV_MASK 0x3 #define PMC_CTRL 0x0 #define PMC_CTRL_BLINK_ENB (1 << 7) #define PMC_DPD_PADS_ORIDE 0x1c #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20) #define PMC_BLINK_TIMER_DATA_ON_SHIFT 0 #define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff #define PMC_BLINK_TIMER_ENB (1 << 15) #define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16 #define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff #define UTMIP_PLL_CFG2 0x488 #define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6) #define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN (1 << 0) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP (1 << 1) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN (1 << 2) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP (1 << 3) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN (1 << 4) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP (1 << 5) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN (1 << 24) #define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP (1 << 25) #define UTMIP_PLL_CFG1 0x484 #define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) #define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) #define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP (1 << 15) #define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN (1 << 14) #define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN (1 << 12) #define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP (1 << 17) #define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN (1 << 16) /* PLLE */ #define PLLE_BASE_LOCK_OVERRIDE (0x1 << 29) #define PLLE_BASE_DIVCML_SHIFT 24 #define PLLE_BASE_DIVCML_MASK (0xf<<PLLE_BASE_DIVCML_SHIFT) #define PLLE_BASE_DIVN_MASK (0xFF<<PLL_BASE_DIVN_SHIFT) #define PLLE_BASE_DIVM_MASK (0xFF<<PLL_BASE_DIVM_SHIFT) /* PLLE has 4-bit CMLDIV, but entry 15 is not allowed in h/w */ #define PLLE_CMLDIV_MAX 14 #define PLLE_MISC_READY (1<<15) #define PLLE_MISC_IDDQ_SW_CTRL (1<<14) #define PLLE_MISC_IDDQ_SW_VALUE (1<<13) #define PLLE_MISC_LOCK (1<<11) #define PLLE_MISC_LOCK_ENABLE (1<<9) #define PLLE_MISC_PLLE_PTS (1<<8) #define PLLE_MISC_VREG_BG_CTRL_SHIFT 4 #define PLLE_MISC_VREG_BG_CTRL_MASK (0x3<<PLLE_MISC_VREG_BG_CTRL_SHIFT) #define PLLE_MISC_VREG_CTRL_SHIFT 2 #define PLLE_MISC_VREG_CTRL_MASK (0x3<<PLLE_MISC_VREG_CTRL_SHIFT) #define PLLE_SS_CTRL 0x68 #define PLLE_SS_INCINTRV_SHIFT 24 #define PLLE_SS_INCINTRV_MASK (0x3f<<PLLE_SS_INCINTRV_SHIFT) #define PLLE_SS_INC_SHIFT 16 #define PLLE_SS_INC_MASK (0xff<<PLLE_SS_INC_SHIFT) #define PLLE_SS_CNTL_INVERT (0x1 << 15) #define PLLE_SS_CNTL_CENTER (0x1 << 14) #define PLLE_SS_CNTL_SSC_BYP (0x1 << 12) #define PLLE_SS_CNTL_INTERP_RESET (0x1 << 11) #define PLLE_SS_CNTL_BYPASS_SS (0x1 << 10) #define PLLE_SS_MAX_SHIFT 0 #define PLLE_SS_MAX_MASK (0x1ff<<PLLE_SS_MAX_SHIFT) #define PLLE_SS_COEFFICIENTS_MASK \ (PLLE_SS_INCINTRV_MASK | PLLE_SS_INC_MASK | PLLE_SS_MAX_MASK) #define PLLE_SS_COEFFICIENTS_VAL \ ((0x20<<PLLE_SS_INCINTRV_SHIFT) | (0x1<<PLLE_SS_INC_SHIFT) | \ (0x25<<PLLE_SS_MAX_SHIFT)) #define PLLE_SS_DISABLE (PLLE_SS_CNTL_SSC_BYP |\ PLLE_SS_CNTL_INTERP_RESET | PLLE_SS_CNTL_BYPASS_SS) #define PLLE_AUX 0x48c #define PLLE_AUX_PLLRE_SEL (1<<28) #define PLLE_AUX_SEQ_STATE_SHIFT 26 #define PLLE_AUX_SEQ_STATE_MASK (0x3<<PLLE_AUX_SEQ_STATE_SHIFT) #define PLLE_AUX_SEQ_START_STATE (1<<25) #define PLLE_AUX_SEQ_ENABLE (1<<24) #define PLLE_AUX_SS_SWCTL (1<<6) #define PLLE_AUX_ENABLE_SWCTL (1<<4) #define PLLE_AUX_USE_LOCKDET (1<<3) #define PLLE_AUX_PLLP_SEL (1<<2) #define PLLE_AUX_CML_SATA_ENABLE (1<<1) #define PLLE_AUX_CML_PCIE_ENABLE (1<<0) /* USB PLLs PD HW controls */ #define XUSBIO_PLL_CFG0 0x51c #define XUSBIO_PLL_CFG0_SEQ_START_STATE (1<<25) #define XUSBIO_PLL_CFG0_SEQ_ENABLE (1<<24) #define XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET (1<<6) #define XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL (1<<2) #define XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL (1<<0) #define UTMIPLL_HW_PWRDN_CFG0 0x52c #define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE (1<<25) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE (1<<24) #define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET (1<<6) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE (1<<5) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL (1<<4) #define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL (1<<2) #define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE (1<<1) #define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL (1<<0) #define PLLU_HW_PWRDN_CFG0 0x530 #define PLLU_HW_PWRDN_CFG0_SEQ_START_STATE (1<<25) #define PLLU_HW_PWRDN_CFG0_SEQ_ENABLE (1<<24) #define PLLU_HW_PWRDN_CFG0_USE_LOCKDET (1<<6) #define PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL (1<<2) #define PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL (1<<0) #define USB_PLLS_SEQ_START_STATE (1<<25) #define USB_PLLS_SEQ_ENABLE (1<<24) #define USB_PLLS_USE_LOCKDET (1<<6) #define USB_PLLS_ENABLE_SWCTL ((1<<2) | (1<<0)) /* XUSB PLL PAD controls */ #define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0 0x40 #define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_PWR_OVRD (1<<3) #define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_IDDQ (1<<0) /* DFLL */ #ifndef CONFIG_ARCH_TEGRA_13x_SOC #define DFLL_BASE 0x2f4 #else #define DFLL_BASE 0x80 #endif #define DFLL_BASE_RESET (1<<0) #define LVL2_CLK_GATE_OVRE 0x554 #define ROUND_DIVIDER_UP 0 #define ROUND_DIVIDER_DOWN 1 #define DIVIDER_1_5_ALLOWED 0 /* Tegra CPU clock and reset control regs */ #define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c #define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340 #define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344 #define TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR 0x34c #define TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470 #define CPU_CLOCK(cpu) (0x1 << (8 + cpu)) #define CPU_RESET(cpu) (0x111001ul << (cpu)) /* PLLP default fixed rate in h/w controlled mode */ #define PLLP_DEFAULT_FIXED_RATE 408000000 /* Use PLL_RE as PLLE input (default - OSC via pll reference divider) */ #define USE_PLLE_INPUT_PLLRE 0 static bool tegra12_is_dyn_ramp(struct clk *c, unsigned long rate, bool from_vco_min); static void tegra12_pllp_init_dependencies(unsigned long pllp_rate); static unsigned long tegra12_clk_shared_bus_update(struct clk *bus, struct clk **bus_top, struct clk **bus_slow, unsigned long *rate_cap); static unsigned long tegra12_clk_cap_shared_bus(struct clk *bus, unsigned long rate, unsigned long ceiling); static bool tegra12_periph_is_special_reset(struct clk *c); static void tegra12_dfll_cpu_late_init(struct clk *c); static bool detach_shared_bus; module_param(detach_shared_bus, bool, 0644); /* Defines default range for dynamic frequency lock loop (DFLL) to be used as CPU clock source: "0" - DFLL is not used, "1" - DFLL is used as a source for all CPU rates "2" - DFLL is used only for high rates above crossover with PLL dvfs curve */ static int use_dfll; /** * Structure defining the fields for USB UTMI clocks Parameters. */ struct utmi_clk_param { /* Oscillator Frequency in KHz */ u32 osc_frequency; /* UTMIP PLL Enable Delay Count */ u8 enable_delay_count; /* UTMIP PLL Stable count */ u8 stable_count; /* UTMIP PLL Active delay count */ u8 active_delay_count; /* UTMIP PLL Xtal frequency count */ u8 xtal_freq_count; }; static const struct utmi_clk_param utmi_parameters[] = { /* OSC_FREQUENCY, ENABLE_DLY, STABLE_CNT, ACTIVE_DLY, XTAL_FREQ_CNT */ {13000000, 0x02, 0x33, 0x05, 0x7F}, {19200000, 0x03, 0x4B, 0x06, 0xBB}, {12000000, 0x02, 0x2F, 0x04, 0x76}, {26000000, 0x04, 0x66, 0x09, 0xFE}, {16800000, 0x03, 0x41, 0x0A, 0xA4}, }; static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE); #ifdef CONFIG_ARCH_TEGRA_13x_SOC static void __iomem *reg_clk13_base = IO_ADDRESS(TEGRA_CLK13_RESET_BASE); #endif static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); static void __iomem *misc_gp_base = IO_ADDRESS(TEGRA_APB_MISC_BASE); static void __iomem *reg_xusb_padctl_base = IO_ADDRESS(TEGRA_XUSB_PADCTL_BASE); #define MISC_GP_TRANSACTOR_SCRATCH_0 0x864 #define MISC_GP_TRANSACTOR_SCRATCH_LA_ENABLE (0x1 << 1) #define MISC_GP_TRANSACTOR_SCRATCH_DDS_ENABLE (0x1 << 2) #define MISC_GP_TRANSACTOR_SCRATCH_DP2_ENABLE (0x1 << 3) /* * Some peripheral clocks share an enable bit, so refcount the enable bits * in registers CLK_ENABLE_L, ... CLK_ENABLE_W, and protect refcount updates * with lock */ static DEFINE_SPINLOCK(periph_refcount_lock); static int tegra_periph_clk_enable_refcount[CLK_OUT_ENB_NUM * 32]; #define clk_writel(value, reg) \ __raw_writel(value, reg_clk_base + (reg)) #define clk_readl(reg) \ __raw_readl(reg_clk_base + (reg)) #define pmc_writel(value, reg) \ __raw_writel(value, reg_pmc_base + (reg)) #define pmc_readl(reg) \ readl(reg_pmc_base + (reg)) #define xusb_padctl_writel(value, reg) \ __raw_writel(value, reg_xusb_padctl_base + (reg)) #define xusb_padctl_readl(reg) \ readl(reg_xusb_padctl_base + (reg)) static inline void clk_writel_delay(u32 value, u32 reg) { __raw_writel((value), reg_clk_base + (reg)); __raw_readl(reg_clk_base + (reg)); dsb(); udelay(2); } static inline void pll_writel_delay(u32 value, u32 reg) { __raw_writel((value), reg_clk_base + (reg)); __raw_readl(reg_clk_base + (reg)); dsb(); udelay(1); } /* Overloading clk_writelx macro based on the TEGRA_13x_SOC define */ #ifndef CONFIG_ARCH_TEGRA_13x_SOC #define clk_writelx(value, reg) \ __raw_writel(value, reg_clk_base + (reg)) #define clk_readlx(reg) \ __raw_readl(reg_clk_base + (reg)) #else #define clk_writelx(value, reg) \ __raw_writel(value, reg_clk13_base + (reg)) #define clk_readlx(reg) \ __raw_readl(reg_clk13_base + (reg)) #endif static inline void clk_writelx_delay(u32 value, u32 reg) { clk_writelx(value, reg); clk_readlx(reg); dsb(); udelay(2); } static inline void pll_writelx_delay(u32 value, u32 reg) { clk_writelx(value, reg); clk_readlx(reg); dsb(); udelay(1); } static inline int clk_set_div(struct clk *c, u32 n) { return clk_set_rate(c, (clk_get_rate(c->parent) + n-1) / n); } static inline u32 periph_clk_to_reg( struct clk *c, u32 reg_L, u32 reg_V, u32 reg_X, int offs) { u32 reg = c->u.periph.clk_num / 32; BUG_ON(reg >= RST_DEVICES_NUM); if (reg < 3) reg = reg_L + (reg * offs); else if (reg < 5) reg = reg_V + ((reg - 3) * offs); else reg = reg_X; return reg; } static int clk_div_x1_get_divider(unsigned long parent_rate, unsigned long rate, u32 max_x, u32 flags, u32 round_mode) { s64 divider_ux1 = parent_rate; if (!rate) return -EINVAL; if (!(flags & DIV_U71_INT)) divider_ux1 *= 2; if (round_mode == ROUND_DIVIDER_UP) divider_ux1 += rate - 1; do_div(divider_ux1, rate); if (flags & DIV_U71_INT) divider_ux1 *= 2; if (divider_ux1 - 2 < 0) return 0; if (divider_ux1 - 2 > max_x) return -EINVAL; #if !DIVIDER_1_5_ALLOWED if (divider_ux1 == 3) divider_ux1 = (round_mode == ROUND_DIVIDER_UP) ? 4 : 2; #endif return divider_ux1 - 2; } static int clk_div71_get_divider(unsigned long parent_rate, unsigned long rate, u32 flags, u32 round_mode) { return clk_div_x1_get_divider(parent_rate, rate, 0xFF, flags, round_mode); } static int clk_div151_get_divider(unsigned long parent_rate, unsigned long rate, u32 flags, u32 round_mode) { return clk_div_x1_get_divider(parent_rate, rate, 0xFFFF, flags, round_mode); } static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate) { s64 divider_u16; divider_u16 = parent_rate; if (!rate) return -EINVAL; divider_u16 += rate - 1; do_div(divider_u16, rate); if (divider_u16 - 1 < 0) return 0; if (divider_u16 - 1 > 0xFFFF) return -EINVAL; return divider_u16 - 1; } static long fixed_src_bus_round_updown(struct clk *c, struct clk *src, u32 flags, unsigned long rate, bool up) { int divider; unsigned long source_rate, round_rate; source_rate = clk_get_rate(src); divider = clk_div71_get_divider(source_rate, rate + (up ? -1 : 1), flags, up ? ROUND_DIVIDER_DOWN : ROUND_DIVIDER_UP); if (divider < 0) return c->min_rate; round_rate = source_rate * 2 / (divider + 2); if (round_rate > c->max_rate) { divider += flags & DIV_U71_INT ? 2 : 1; #if !DIVIDER_1_5_ALLOWED divider = max(2, divider); #endif round_rate = source_rate * 2 / (divider + 2); } return round_rate; } static inline bool bus_user_is_slower(struct clk *a, struct clk *b) { return a->u.shared_bus_user.client->max_rate * a->div < b->u.shared_bus_user.client->max_rate * b->div; } static inline bool bus_user_request_is_lower(struct clk *a, struct clk *b) { return a->u.shared_bus_user.rate * a->div < b->u.shared_bus_user.rate * b->div; } /* clk_m functions */ static unsigned long tegra12_clk_m_autodetect_rate(struct clk *c) { u32 osc_ctrl = clk_readl(OSC_CTRL); u32 auto_clock_control = osc_ctrl & ~OSC_CTRL_OSC_FREQ_MASK; u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK; u32 spare = clk_readl(SPARE_REG); u32 divisor = (spare & SPARE_REG_CLK_M_DIVISOR_MASK) >> SPARE_REG_CLK_M_DIVISOR_SHIFT; u32 spare_update = spare & ~SPARE_REG_CLK_M_DIVISOR_MASK; c->rate = tegra_clk_measure_input_freq(); switch (c->rate) { case 12000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_12MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); BUG_ON(divisor != 0); break; case 13000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_13MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); BUG_ON(divisor != 0); break; case 19200000: auto_clock_control |= OSC_CTRL_OSC_FREQ_19_2MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); BUG_ON(divisor != 0); break; case 26000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_26MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); BUG_ON(divisor != 0); break; case 16800000: auto_clock_control |= OSC_CTRL_OSC_FREQ_16_8MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); BUG_ON(divisor != 0); break; case 38400000: auto_clock_control |= OSC_CTRL_OSC_FREQ_38_4MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2); BUG_ON(divisor != 1); spare_update |= (1 << SPARE_REG_CLK_M_DIVISOR_SHIFT); break; case 48000000: auto_clock_control |= OSC_CTRL_OSC_FREQ_48MHZ; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4); BUG_ON(divisor != 3); spare_update |= (3 << SPARE_REG_CLK_M_DIVISOR_SHIFT); break; case 115200: /* fake 13M for QT */ case 230400: /* fake 13M for QT */ auto_clock_control |= OSC_CTRL_OSC_FREQ_13MHZ; c->rate = 13000000; BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1); BUG_ON(divisor != 0); break; default: pr_err("%s: Unexpected clock rate %ld", __func__, c->rate); BUG(); } clk_writel(auto_clock_control, OSC_CTRL); clk_writel(spare_update, SPARE_REG); return c->rate; } static void tegra12_clk_m_init(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); tegra12_clk_m_autodetect_rate(c); } static int tegra12_clk_m_enable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); return 0; } static void tegra12_clk_m_disable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); WARN(1, "Attempting to disable main SoC clock\n"); } static struct clk_ops tegra_clk_m_ops = { .init = tegra12_clk_m_init, .enable = tegra12_clk_m_enable, .disable = tegra12_clk_m_disable, }; static struct clk_ops tegra_clk_m_div_ops = { .enable = tegra12_clk_m_enable, }; /* PLL reference divider functions */ static void tegra12_pll_ref_init(struct clk *c) { u32 pll_ref_div = clk_readl(OSC_CTRL) & OSC_CTRL_PLL_REF_DIV_MASK; pr_debug("%s on clock %s\n", __func__, c->name); switch (pll_ref_div) { case OSC_CTRL_PLL_REF_DIV_1: c->div = 1; break; case OSC_CTRL_PLL_REF_DIV_2: c->div = 2; break; case OSC_CTRL_PLL_REF_DIV_4: c->div = 4; break; default: pr_err("%s: Invalid pll ref divider %d", __func__, pll_ref_div); BUG(); } c->mul = 1; c->state = ON; } static struct clk_ops tegra_pll_ref_ops = { .init = tegra12_pll_ref_init, .enable = tegra12_clk_m_enable, .disable = tegra12_clk_m_disable, }; /* super clock functions */ /* "super clocks" on tegra12x have two-stage muxes, fractional 7.1 divider and * clock skipping super divider. We will ignore the clock skipping divider, * since we can't lower the voltage when using the clock skip, but we can if * we lower the PLL frequency. Note that skipping divider can and will be used * by thermal control h/w for automatic throttling. There is also a 7.1 divider * that most CPU super-clock inputs can be routed through. We will not use it * as well (keep default 1:1 state), to avoid high jitter on PLLX and DFLL path * and possible concurrency access issues with thermal h/w (7.1 divider setting * share register with clock skipping divider) */ static void tegra12_super_clk_init(struct clk *c) { u32 val; int source; int shift; const struct clk_mux_sel *sel; val = clk_readl(c->reg + SUPER_CLK_MUX); c->state = ON; BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) && ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE)); shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ? SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT; source = (val >> shift) & SUPER_SOURCE_MASK; /* * Enforce PLLX DIV2 bypass setting as early as possible. It is always * safe to do for both cclk_lp and cclk_g when booting on G CPU. (In * case of booting on LP CPU, cclk_lp will be updated during the cpu * rate change after boot, and cclk_g after the cluster switch.) */ if ((c->flags & DIV_U71) && (!is_lp_cluster())) { val |= SUPER_LP_DIV2_BYPASS; clk_writel_delay(val, c->reg); } for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->value == source) break; } BUG_ON(sel->input == NULL); c->parent = sel->input; /* Update parent in case when LP CPU PLLX DIV2 bypassed */ if ((c->flags & DIV_2) && (c->parent->flags & PLLX) && (val & SUPER_LP_DIV2_BYPASS)) c->parent = c->parent->parent; /* Update parent in case when LP CPU PLLX DIV2 bypassed */ if ((c->flags & DIV_2) && (c->parent->flags & PLLX) && (val & SUPER_LP_DIV2_BYPASS)) c->parent = c->parent->parent; if (c->flags & DIV_U71) { c->mul = 2; c->div = 2; /* * Make sure 7.1 divider is 1:1; clear h/w skipper control - * it will be enabled by soctherm later */ val = clk_readl(c->reg + SUPER_CLK_DIVIDER); BUG_ON(val & SUPER_CLOCK_DIV_U71_MASK); val = 0; clk_writel(val, c->reg + SUPER_CLK_DIVIDER); } else clk_writel(0, c->reg + SUPER_CLK_DIVIDER); } static int tegra12_super_clk_enable(struct clk *c) { return 0; } static void tegra12_super_clk_disable(struct clk *c) { /* since tegra12 has 2 CPU super clocks - low power lp-mode clock and geared up g-mode super clock - mode switch may request to disable either of them; accept request with no affect on h/w */ } static int tegra12_super_clk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; int shift; val = clk_readl(c->reg + SUPER_CLK_MUX); BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) && ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE)); shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ? SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { /* For LP mode super-clock switch between PLLX direct and divided-by-2 outputs is allowed only when other than PLLX clock source is current parent */ if ((c->flags & DIV_2) && (p->flags & PLLX) && ((sel->value ^ val) & SUPER_LP_DIV2_BYPASS)) { if (c->parent->flags & PLLX) return -EINVAL; val ^= SUPER_LP_DIV2_BYPASS; clk_writel_delay(val, c->reg); } val &= ~(SUPER_SOURCE_MASK << shift); val |= (sel->value & SUPER_SOURCE_MASK) << shift; if (c->flags & DIV_U71) { /* Make sure 7.1 divider is 1:1 */ u32 div = clk_readl(c->reg + SUPER_CLK_DIVIDER); BUG_ON(div & SUPER_CLOCK_DIV_U71_MASK); } if (c->refcnt) clk_enable(p); clk_writel_delay(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } /* * Do not use super clocks "skippers", since dividing using a clock skipper * does not allow the voltage to be scaled down. Instead adjust the rate of * the parent clock. This requires that the parent of a super clock have no * other children, otherwise the rate will change underneath the other * children. */ static int tegra12_super_clk_set_rate(struct clk *c, unsigned long rate) { /* In tegra12_cpu_clk_set_plls() and tegra12_sbus_cmplx_set_rate() * this call is skipped by directly setting rate of source plls. If we * ever use 7.1 divider at other than 1:1 setting, or exercise s/w * skipper control, not only this function, but cpu and sbus set_rate * APIs should be changed accordingly. */ return clk_set_rate(c->parent, rate); } #ifdef CONFIG_PM_SLEEP #ifndef CONFIG_ARCH_TEGRA_13x_SOC static void tegra12_super_clk_resume(struct clk *c, struct clk *backup, u32 setting) { u32 val; const struct clk_mux_sel *sel; int shift; /* For sclk and cclk_g super clock just restore saved value */ if (!(c->flags & DIV_2)) { clk_writel_delay(setting, c->reg); return; } /* * For cclk_lp supper clock: switch to backup (= not PLLX) source, * safely restore PLLX DIV2 bypass, and only then restore full * setting */ val = clk_readl(c->reg); BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) && ((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE)); shift = ((val & SUPER_STATE_MASK) == SUPER_STATE_IDLE) ? SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == backup) { val &= ~(SUPER_SOURCE_MASK << shift); val |= (sel->value & SUPER_SOURCE_MASK) << shift; BUG_ON(backup->flags & PLLX); clk_writel_delay(val, c->reg); val &= ~SUPER_LP_DIV2_BYPASS; val |= (setting & SUPER_LP_DIV2_BYPASS); clk_writel_delay(val, c->reg); clk_writel_delay(setting, c->reg); return; } } BUG(); } #endif #endif static struct clk_ops tegra_super_ops = { .init = tegra12_super_clk_init, .enable = tegra12_super_clk_enable, .disable = tegra12_super_clk_disable, .set_parent = tegra12_super_clk_set_parent, .set_rate = tegra12_super_clk_set_rate, }; #ifdef CONFIG_ARCH_TEGRA_13x_SOC static void tegra13_super_cclk_init(struct clk *c) { u32 val; int source; int shift; const struct clk_mux_sel *sel; val = clk_readlx(c->reg + SUPER_CLK_MUX); c->state = ON; shift = CLK13_SOURCE_SHIFT; source = (val >> shift) & CLK13_SOURCE_MASK; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->value == source) break; } BUG_ON(sel->input == NULL); c->parent = sel->input; if (c->flags & DIV_U71) { c->mul = 2; c->div = 2; /* * Make sure 7.1 divider is 1:1; clear h/w skipper control - * it will be enabled by soctherm later */ val = clk_readlx(c->reg + SUPER_CLK_DIVIDER); BUG_ON(val & SUPER_CLOCK_DIV_U71_MASK); val = 0; clk_writelx(val, c->reg + SUPER_CLK_DIVIDER); } else clk_writelx(0, c->reg + SUPER_CLK_DIVIDER); } static int tegra13_super_cclk_enable(struct clk *c) { return 0; } static void tegra13_super_cclk_disable(struct clk *c) { /* since tegra13 has 1 CPU super clocks that is never disabled by clock framework accept request with no affect on h/w */ } static int tegra13_super_cclk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; int shift; val = clk_readlx(c->reg); shift = CLK13_SOURCE_SHIFT; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { val &= ~(CLK13_SOURCE_MASK << shift); val |= (sel->value & CLK13_SOURCE_MASK) << shift; if (c->flags & DIV_U71) { /* Make sure 7.1 divider is 1:1 */ u32 div = clk_readlx(c->reg + SUPER_CLK_DIVIDER); BUG_ON(div & SUPER_CLOCK_DIV_U71_MASK); } if (c->refcnt) clk_enable(p); clk_writelx_delay(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } /* * Do not use super clocks "skippers", since dividing using a clock skipper * does not allow the voltage to be scaled down. Instead adjust the rate of * the parent clock. This requires that the parent of a super clock have no * other children, otherwise the rate will change underneath the other * children. */ static int tegra13_super_cclk_set_rate(struct clk *c, unsigned long rate) { /* In tegra12_cpu_clk_set_plls() op (shared with tegra13 as well) * this call is skipped by directly setting rate of source plls. If we * ever use 7.1 divider at other than 1:1 setting, or exercise s/w * skipper control, not only this function, but cpu and sbus set_rate * APIs should be changed accordingly. */ return clk_set_rate(c->parent, rate); } static struct clk_ops tegra13_super_cclk_ops = { .init = tegra13_super_cclk_init, .enable = tegra13_super_cclk_enable, .disable = tegra13_super_cclk_disable, .set_parent = tegra13_super_cclk_set_parent, .set_rate = tegra13_super_cclk_set_rate, }; #endif /* virtual cpu clock functions */ /* some clocks can not be stopped (cpu, memory bus) while the SoC is running. To change the frequency of these clocks, the parent pll may need to be reprogrammed, so the clock must be moved off the pll, the pll reprogrammed, and then the clock moved back to the pll. To hide this sequence, a virtual clock handles it. */ static void tegra12_cpu_clk_init(struct clk *c) { c->state = (!is_lp_cluster() == (c->u.cpu.mode == MODE_G))? ON : OFF; } static int tegra12_cpu_clk_enable(struct clk *c) { return 0; } static void tegra12_cpu_clk_disable(struct clk *c) { /* * tegra12 has 2 virtual CPU clocks - low power lp-mode clock * and geared up g-mode clock - mode switch may request to disable * either of them; tegra13 that shares CPU ops with tegra12 has * only one virtual CPU that is never disabled; in any case accept * request with no affect on h/w/ */ } static int tegra12_cpu_clk_set_plls(struct clk *c, unsigned long rate, unsigned long old_rate) { int ret = 0; bool on_main = false; unsigned long backup_rate, main_rate; unsigned long vco_min = c->u.cpu.main->u.pll.vco_min; /* * Take an extra reference to the main pll so it doesn't turn off when * we move the cpu off of it. If possible, use main pll dynamic ramp * to reach target rate in one shot. Otherwise, use dynamic ramp to * lower current rate to pll VCO minimum level before switching to * backup source. */ if (c->parent->parent == c->u.cpu.main) { bool dramp = (rate > c->u.cpu.backup_rate) && tegra12_is_dyn_ramp(c->u.cpu.main, rate, false); clk_enable(c->u.cpu.main); on_main = true; if (dramp || ((old_rate > vco_min) && tegra12_is_dyn_ramp(c->u.cpu.main, vco_min, false))) { main_rate = dramp ? rate : vco_min; ret = clk_set_rate(c->u.cpu.main, main_rate); if (ret) { pr_err("Failed to set cpu rate %lu on source" " %s\n", main_rate, c->u.cpu.main->name); goto out; } if (dramp) goto out; } else if (old_rate > vco_min) { #if PLLXC_USE_DYN_RAMP pr_warn("No dynamic ramp down: %s: %lu to %lu\n", c->u.cpu.main->name, old_rate, vco_min); #endif } } /* Switch to back-up source, and stay on it if target rate is below backup rate */ if (c->parent->parent != c->u.cpu.backup) { ret = clk_set_parent(c->parent, c->u.cpu.backup); if (ret) { pr_err("Failed to switch cpu to %s\n", c->u.cpu.backup->name); goto out; } } backup_rate = min(rate, c->u.cpu.backup_rate); if (backup_rate != clk_get_rate_locked(c)) { ret = clk_set_rate(c->u.cpu.backup, backup_rate); if (ret) { pr_err("Failed to set cpu rate %lu on backup source\n", backup_rate); goto out; } } if (rate == backup_rate) goto out; /* Switch from backup source to main at rate not exceeding pll VCO minimum. Use dynamic ramp to reach target rate if it is above VCO minimum. */ main_rate = rate; if (rate > vco_min) { if (tegra12_is_dyn_ramp(c->u.cpu.main, rate, true)) main_rate = vco_min; #if PLLXC_USE_DYN_RAMP else pr_warn("No dynamic ramp up: %s: %lu to %lu\n", c->u.cpu.main->name, vco_min, rate); #endif } ret = clk_set_rate(c->u.cpu.main, main_rate); if (ret) { pr_err("Failed to set cpu rate %lu on source" " %s\n", main_rate, c->u.cpu.main->name); goto out; } ret = clk_set_parent(c->parent, c->u.cpu.main); if (ret) { pr_err("Failed to switch cpu to %s\n", c->u.cpu.main->name); goto out; } if (rate != main_rate) { ret = clk_set_rate(c->u.cpu.main, rate); if (ret) { pr_err("Failed to set cpu rate %lu on source" " %s\n", rate, c->u.cpu.main->name); goto out; } } out: if (on_main) clk_disable(c->u.cpu.main); return ret; } static int tegra12_cpu_clk_dfll_on(struct clk *c, unsigned long rate, unsigned long old_rate) { int ret; struct clk *dfll = c->u.cpu.dynamic; unsigned long dfll_rate_min = c->dvfs->dfll_data.use_dfll_rate_min; /* dfll rate request */ ret = clk_set_rate(dfll, rate); if (ret) { pr_err("Failed to set cpu rate %lu on source" " %s\n", rate, dfll->name); return ret; } /* 1st time - switch to dfll */ if (c->parent->parent != dfll) { if (max(old_rate, rate) < dfll_rate_min) { /* set interim cpu dvfs rate at dfll_rate_min to prevent voltage drop below dfll Vmin */ ret = tegra_dvfs_set_rate(c, dfll_rate_min); if (ret) { pr_err("Failed to set cpu dvfs rate %lu\n", dfll_rate_min); return ret; } } tegra_dvfs_rail_mode_updating(tegra_cpu_rail, true); ret = clk_set_parent(c->parent, dfll); if (ret) { tegra_dvfs_rail_mode_updating(tegra_cpu_rail, false); pr_err("Failed to switch cpu to %s\n", dfll->name); return ret; } ret = tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 1); WARN(ret, "Failed to lock %s at rate %lu\n", dfll->name, rate); /* prevent legacy dvfs voltage scaling */ tegra_dvfs_dfll_mode_set(c->dvfs, rate); tegra_dvfs_rail_mode_updating(tegra_cpu_rail, false); } return 0; } static int tegra12_cpu_clk_dfll_off(struct clk *c, unsigned long rate, unsigned long old_rate) { int ret; struct clk *pll; struct clk *dfll = c->u.cpu.dynamic; unsigned long dfll_rate_min = c->dvfs->dfll_data.use_dfll_rate_min; rate = min(rate, c->max_rate - c->dvfs->dfll_data.max_rate_boost); pll = (rate <= c->u.cpu.backup_rate) ? c->u.cpu.backup : c->u.cpu.main; dfll_rate_min = max(rate, dfll_rate_min); /* set target rate last time in dfll mode */ if (old_rate != dfll_rate_min) { ret = tegra_dvfs_set_rate(c, dfll_rate_min); if (!ret) ret = clk_set_rate(dfll, dfll_rate_min); if (ret) { pr_err("Failed to set cpu rate %lu on source %s\n", dfll_rate_min, dfll->name); return ret; } } /* unlock dfll - release volatge rail control */ tegra_dvfs_rail_mode_updating(tegra_cpu_rail, true); ret = tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 0); if (ret) { pr_err("Failed to unlock %s\n", dfll->name); goto back_to_dfll; } /* restore legacy dvfs operations and set appropriate voltage */ ret = tegra_dvfs_dfll_mode_clear(c->dvfs, dfll_rate_min); if (ret) { pr_err("Failed to set cpu rail for rate %lu\n", rate); goto back_to_dfll; } /* set pll to target rate and return to pll source */ ret = clk_set_rate(pll, rate); if (ret) { pr_err("Failed to set cpu rate %lu on source" " %s\n", rate, pll->name); goto back_to_dfll; } ret = clk_set_parent(c->parent, pll); if (ret) { pr_err("Failed to switch cpu to %s\n", pll->name); goto back_to_dfll; } /* If going up, adjust voltage here (down path is taken care of by the framework after set rate exit) */ if (old_rate <= rate) tegra_dvfs_set_rate(c, rate); tegra_dvfs_rail_mode_updating(tegra_cpu_rail, false); return 0; back_to_dfll: tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 1); tegra_dvfs_dfll_mode_set(c->dvfs, old_rate); tegra_dvfs_rail_mode_updating(tegra_cpu_rail, false); return ret; } static int tegra12_cpu_clk_set_rate(struct clk *c, unsigned long rate) { unsigned long old_rate = clk_get_rate_locked(c); bool has_dfll = c->u.cpu.dynamic && (c->u.cpu.dynamic->state != UNINITIALIZED); bool is_dfll = c->parent->parent == c->u.cpu.dynamic; /* On SILICON allow CPU rate change only if cpu regulator is connected. Ignore regulator connection on FPGA and SIMULATION platforms. */ if (c->dvfs && tegra_platform_is_silicon()) { if (!c->dvfs->dvfs_rail) return -ENOSYS; else if ((!c->dvfs->dvfs_rail->reg) && (old_rate < rate) && (c->boot_rate < rate)) { WARN(1, "Increasing CPU rate while regulator is not" " ready is not allowed\n"); return -ENOSYS; } } if (has_dfll && c->dvfs && c->dvfs->dvfs_rail) { if (tegra_dvfs_is_dfll_range(c->dvfs, rate)) return tegra12_cpu_clk_dfll_on(c, rate, old_rate); else if (is_dfll) return tegra12_cpu_clk_dfll_off(c, rate, old_rate); } return tegra12_cpu_clk_set_plls(c, rate, old_rate); } static long tegra12_cpu_clk_round_rate(struct clk *c, unsigned long rate) { unsigned long max_rate = c->max_rate; /* Remove dfll boost to maximum rate when running on PLL */ if (c->dvfs && !tegra_dvfs_is_dfll_scale(c->dvfs, rate)) max_rate -= c->dvfs->dfll_data.max_rate_boost; if (rate > max_rate) rate = max_rate; else if (rate < c->min_rate) rate = c->min_rate; return rate; } static struct clk_ops tegra_cpu_ops = { .init = tegra12_cpu_clk_init, .enable = tegra12_cpu_clk_enable, .disable = tegra12_cpu_clk_disable, .set_rate = tegra12_cpu_clk_set_rate, .round_rate = tegra12_cpu_clk_round_rate, }; static void tegra12_cpu_cmplx_clk_init(struct clk *c) { int i = !!is_lp_cluster(); BUG_ON(c->inputs[0].input->u.cpu.mode != MODE_G); #ifndef CONFIG_ARCH_TEGRA_13x_SOC BUG_ON(c->inputs[1].input->u.cpu.mode != MODE_LP); #endif c->parent = c->inputs[i].input; } /* cpu complex clock provides second level vitualization (on top of cpu virtual cpu rate control) in order to hide the CPU mode switch sequence */ #if PARAMETERIZE_CLUSTER_SWITCH static unsigned int switch_delay; static unsigned int switch_flags; static DEFINE_SPINLOCK(parameters_lock); void tegra_cluster_switch_set_parameters(unsigned int us, unsigned int flags) { spin_lock(&parameters_lock); switch_delay = us; switch_flags = flags; spin_unlock(&parameters_lock); } #endif static int tegra12_cpu_cmplx_clk_enable(struct clk *c) { return 0; } static void tegra12_cpu_cmplx_clk_disable(struct clk *c) { pr_debug("%s on clock %s\n", __func__, c->name); /* oops - don't disable the CPU complex clock! */ BUG(); } static int tegra12_cpu_cmplx_clk_set_rate(struct clk *c, unsigned long rate) { unsigned long flags; int ret; struct clk *parent = c->parent; if (!parent->ops || !parent->ops->set_rate) return -ENOSYS; clk_lock_save(parent, &flags); ret = clk_set_rate_locked(parent, rate); clk_unlock_restore(parent, &flags); return ret; } static int tegra12_cpu_cmplx_clk_set_parent(struct clk *c, struct clk *p) { int ret; unsigned int flags, delay; const struct clk_mux_sel *sel; unsigned long rate = clk_get_rate(c->parent); struct clk *dfll = c->parent->u.cpu.dynamic ? : p->u.cpu.dynamic; struct clk *p_source_old = NULL; struct clk *p_source; pr_debug("%s: %s %s\n", __func__, c->name, p->name); BUG_ON(c->parent->u.cpu.mode != (is_lp_cluster() ? MODE_LP : MODE_G)); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) break; } if (!sel->input) return -EINVAL; #if PARAMETERIZE_CLUSTER_SWITCH spin_lock(&parameters_lock); flags = switch_flags; delay = switch_delay; switch_flags = 0; spin_unlock(&parameters_lock); if (flags) { /* over/under-clocking after switch - allow, but update rate */ if ((rate > p->max_rate) || (rate < p->min_rate)) { rate = rate > p->max_rate ? p->max_rate : p->min_rate; ret = clk_set_rate(c->parent, rate); if (ret) { pr_err("%s: Failed to set rate %lu for %s\n", __func__, rate, p->name); return ret; } } } else #endif { if (rate > p->max_rate) { /* over-clocking - no switch */ pr_warn("%s: No %s mode switch to %s at rate %lu\n", __func__, c->name, p->name, rate); return -ECANCELED; } flags = TEGRA_POWER_CLUSTER_IMMEDIATE; flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT; delay = 0; } flags |= (p->u.cpu.mode == MODE_LP) ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G; if (p == c->parent) { if (flags & TEGRA_POWER_CLUSTER_FORCE) { /* Allow parameterized switch to the same mode */ ret = tegra_cluster_control(delay, flags); if (ret) pr_err("%s: Failed to force %s mode to %s\n", __func__, c->name, p->name); return ret; } return 0; /* already switched - exit */ } tegra_dvfs_rail_mode_updating(tegra_cpu_rail, true); if (c->parent->parent->parent == dfll) { /* G (DFLL selected as clock source) => LP switch: * turn DFLL into open loop mode ("release" VDD_CPU rail) * select target p_source for LP, and get its rate ready */ ret = tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 0); if (ret) goto abort; p_source = rate <= p->u.cpu.backup_rate ? p->u.cpu.backup : p->u.cpu.main; ret = clk_set_rate(p_source, rate); if (ret) goto abort; } else if ((p->parent->parent == dfll) || (p->dvfs && tegra_dvfs_is_dfll_range(p->dvfs, rate))) { /* LP => G (DFLL selected as clock source) switch: * set DFLL rate ready (DFLL is still disabled) * (set target p_source as dfll, G source is already selected) */ p_source = dfll; ret = clk_set_rate(dfll, tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail) ? rate : max(rate, p->dvfs->dfll_data.use_dfll_rate_min)); if (ret) goto abort; ret = tegra_dvfs_rail_dfll_mode_set_cold(tegra_cpu_rail, dfll); if (ret) goto abort; } else /* DFLL is not selected on either side of the switch: * set target p_source equal to current clock source */ p_source = c->parent->parent->parent; /* Switch new parent to target clock source if necessary */ if (p->parent->parent != p_source) { clk_enable(p->parent->parent); clk_enable(p->parent); p_source_old = p->parent->parent; ret = clk_set_parent(p->parent, p_source); if (ret) { pr_err("%s: Failed to set parent %s for %s\n", __func__, p_source->name, p->name); goto abort; } } /* Enabling new parent scales new mode voltage rail in advanvce before the switch happens (if p_source is DFLL: open loop mode) */ if (c->refcnt) clk_enable(p); /* switch CPU mode */ ret = tegra_cluster_control(delay, flags); if (ret) { if (c->refcnt) clk_disable(p); pr_err("%s: Failed to switch %s mode to %s\n", __func__, c->name, p->name); goto abort; } /* * Lock DFLL now (resume closed loop VDD_CPU control). * G CPU operations are resumed on DFLL if it was the last G CPU * clock source, or if resume rate is in DFLL usage range in case * when auto-switch between PLL and DFLL is enabled. */ if (p_source == dfll) { if (tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail)) { tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 1); } else { clk_set_rate(dfll, rate); tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 1); tegra_dvfs_dfll_mode_set(p->dvfs, rate); } } /* Disabling old parent scales old mode voltage rail */ if (c->refcnt) clk_disable(c->parent); if (p_source_old) { clk_disable(p->parent); clk_disable(p_source_old); } clk_reparent(c, p); tegra_dvfs_rail_mode_updating(tegra_cpu_rail, false); return 0; abort: /* Re-lock DFLL if necessary after aborted switch */ if (c->parent->parent->parent == dfll) { clk_set_rate(dfll, rate); tegra_clk_cfg_ex(dfll, TEGRA_CLK_DFLL_LOCK, 1); } if (p_source_old) { clk_disable(p->parent); clk_disable(p_source_old); } tegra_dvfs_rail_mode_updating(tegra_cpu_rail, false); pr_err("%s: aborted switch from %s to %s\n", __func__, c->parent->name, p->name); return ret; } static long tegra12_cpu_cmplx_round_rate(struct clk *c, unsigned long rate) { return clk_round_rate(c->parent, rate); } static struct clk_ops tegra_cpu_cmplx_ops = { .init = tegra12_cpu_cmplx_clk_init, .enable = tegra12_cpu_cmplx_clk_enable, .disable = tegra12_cpu_cmplx_clk_disable, .set_rate = tegra12_cpu_cmplx_clk_set_rate, .set_parent = tegra12_cpu_cmplx_clk_set_parent, .round_rate = tegra12_cpu_cmplx_round_rate, }; /* virtual cop clock functions. Used to acquire the fake 'cop' clock to * reset the COP block (i.e. AVP) */ static void tegra12_cop_clk_reset(struct clk *c, bool assert) { unsigned long reg = assert ? RST_DEVICES_SET_L : RST_DEVICES_CLR_L; pr_debug("%s %s\n", __func__, assert ? "assert" : "deassert"); clk_writel(1 << 1, reg); } static struct clk_ops tegra_cop_ops = { .reset = tegra12_cop_clk_reset, }; /* bus clock functions */ static DEFINE_SPINLOCK(bus_clk_lock); static int bus_set_div(struct clk *c, int div) { u32 val; unsigned long flags; if (!div || (div > (BUS_CLK_DIV_MASK + 1))) return -EINVAL; spin_lock_irqsave(&bus_clk_lock, flags); val = clk_readl(c->reg); val &= ~(BUS_CLK_DIV_MASK << c->reg_shift); val |= (div - 1) << c->reg_shift; clk_writel(val, c->reg); c->div = div; spin_unlock_irqrestore(&bus_clk_lock, flags); return 0; } static void tegra12_bus_clk_init(struct clk *c) { u32 val = clk_readl(c->reg); c->state = ((val >> c->reg_shift) & BUS_CLK_DISABLE) ? OFF : ON; c->div = ((val >> c->reg_shift) & BUS_CLK_DIV_MASK) + 1; c->mul = 1; } static int tegra12_bus_clk_enable(struct clk *c) { u32 val = clk_readl(c->reg); val &= ~(BUS_CLK_DISABLE << c->reg_shift); clk_writel(val, c->reg); return 0; } static void tegra12_bus_clk_disable(struct clk *c) { u32 val = clk_readl(c->reg); val |= BUS_CLK_DISABLE << c->reg_shift; clk_writel(val, c->reg); } static int tegra12_bus_clk_set_rate(struct clk *c, unsigned long rate) { unsigned long parent_rate = clk_get_rate(c->parent); int i; if (tegra_platform_is_qt()) return 0; for (i = 1; i <= 4; i++) { if (rate >= parent_rate / i) return bus_set_div(c, i); } return -EINVAL; } static struct clk_ops tegra_bus_ops = { .init = tegra12_bus_clk_init, .enable = tegra12_bus_clk_enable, .disable = tegra12_bus_clk_disable, .set_rate = tegra12_bus_clk_set_rate, }; /* Virtual system bus complex clock is used to hide the sequence of changing sclk/hclk/pclk parents and dividers to configure requested sclk target rate. */ static void tegra12_sbus_cmplx_init(struct clk *c) { unsigned long rate; c->max_rate = c->parent->max_rate; c->min_rate = c->parent->min_rate; /* Threshold must be an exact proper factor of low range parent, and both low/high range parents have 7.1 fractional dividers */ rate = clk_get_rate(c->u.system.sclk_low->parent); if (tegra_platform_is_qt()) return; if (c->u.system.threshold) { BUG_ON(c->u.system.threshold > rate) ; BUG_ON((rate % c->u.system.threshold) != 0); } BUG_ON(!(c->u.system.sclk_low->flags & DIV_U71)); BUG_ON(!(c->u.system.sclk_high->flags & DIV_U71)); } /* This special sbus round function is implemented because: * * (a) sbus complex clock source is selected automatically based on rate * * (b) since sbus is a shared bus, and its frequency is set to the highest * enabled shared_bus_user clock, the target rate should be rounded up divider * ladder (if max limit allows it) - for pll_div and peripheral_div common is * rounding down - special case again. * * Note that final rate is trimmed (not rounded up) to avoid spiraling up in * recursive calls. Lost 1Hz is added in tegra12_sbus_cmplx_set_rate before * actually setting divider rate. */ static long tegra12_sbus_cmplx_round_updown(struct clk *c, unsigned long rate, bool up) { unsigned long round_rate; struct clk *new_parent; rate = max(rate, c->min_rate); new_parent = (rate <= c->u.system.threshold) ? c->u.system.sclk_low : c->u.system.sclk_high; round_rate = fixed_src_bus_round_updown( c, new_parent->parent, new_parent->flags, rate, up); if (new_parent == c->u.system.sclk_high) { /* Prevent oscillation across threshold */ if (round_rate <= c->u.system.threshold) round_rate = c->u.system.threshold; } return round_rate; } static long tegra12_sbus_cmplx_round_rate(struct clk *c, unsigned long rate) { return tegra12_sbus_cmplx_round_updown(c, rate, true); } /* * FIXME: This limitation may have been relaxed on Tegra12. * This issue has to be visited again once the new limitation is clarified. * * Limitations on SCLK/HCLK/PCLK dividers: * (A) H/w limitation: * if SCLK >= 60MHz, SCLK:PCLK >= 2 * (B) S/w policy limitation, in addition to (A): * if any APB bus shared user request is enabled, HCLK:PCLK >= 2 * Reason for (B): assuming APB bus shared user has requested X < 60MHz, * HCLK = PCLK = X, and new AHB user is coming on-line requesting Y >= 60MHz, * we can consider 2 paths depending on order of changing HCLK rate and * HCLK:PCLK ratio * (i) HCLK:PCLK = X:X => Y:Y* => Y:Y/2, (*) violates rule (A) * (ii) HCLK:PCLK = X:X => X:X/2* => Y:Y/2, (*) under-clocks APB user * In this case we can not guarantee safe transition from HCLK:PCLK = 1:1 * below 60MHz to HCLK rate above 60MHz without under-clocking APB user. * Hence, policy (B). * * Note: when there are no request from APB users, path (ii) can be used to * increase HCLK above 60MHz, and HCLK:PCLK = 1:1 is allowed. */ #define SCLK_PCLK_UNITY_RATIO_RATE_MAX 60000000 #define BUS_AHB_DIV_MAX (BUS_CLK_DIV_MASK + 1UL) #define BUS_APB_DIV_MAX (BUS_CLK_DIV_MASK + 1UL) static int tegra12_sbus_cmplx_set_rate(struct clk *c, unsigned long rate) { int ret; struct clk *new_parent; /* * Configure SCLK/HCLK/PCLK guranteed safe combination: * - select the appropriate sclk parent * - keep hclk at the same rate as sclk * - set pclk at 1:2 rate of hclk */ bus_set_div(c->u.system.pclk, 2); bus_set_div(c->u.system.hclk, 1); c->child_bus->child_bus->div = 2; c->child_bus->div = 1; if (rate == clk_get_rate_locked(c)) return 0; new_parent = (rate <= c->u.system.threshold) ? c->u.system.sclk_low : c->u.system.sclk_high; ret = clk_set_rate(new_parent, rate + 1); if (ret) { pr_err("Failed to set sclk source %s to %lu\n", new_parent->name, rate); return ret; } if (new_parent != clk_get_parent(c->parent)) { ret = clk_set_parent(c->parent, new_parent); if (ret) { pr_err("Failed to switch sclk source to %s\n", new_parent->name); return ret; } } return 0; } static int tegra12_clk_sbus_update(struct clk *bus) { int ret, div; bool p_requested; unsigned long s_rate, h_rate, p_rate, ceiling; struct clk *ahb, *apb; if (detach_shared_bus) return 0; s_rate = tegra12_clk_shared_bus_update(bus, &ahb, &apb, &ceiling); if (bus->override_rate) return clk_set_rate_locked(bus, s_rate); ahb = bus->child_bus; apb = ahb->child_bus; h_rate = ahb->u.shared_bus_user.rate; p_rate = apb->u.shared_bus_user.rate; p_requested = apb->refcnt > 1; /* Propagate ratio requirements up from PCLK to SCLK */ if (p_requested) h_rate = max(h_rate, p_rate * 2); s_rate = max(s_rate, h_rate); if (s_rate >= SCLK_PCLK_UNITY_RATIO_RATE_MAX) s_rate = max(s_rate, p_rate * 2); /* Propagate cap requirements down from SCLK to PCLK */ s_rate = tegra12_clk_cap_shared_bus(bus, s_rate, ceiling); if (s_rate >= SCLK_PCLK_UNITY_RATIO_RATE_MAX) p_rate = min(p_rate, s_rate / 2); h_rate = min(h_rate, s_rate); if (p_requested) p_rate = min(p_rate, h_rate / 2); /* Set new sclk rate in safe 1:1:2, rounded "up" configuration */ ret = clk_set_rate_locked(bus, s_rate); if (ret) return ret; /* Finally settle new bus divider values */ s_rate = clk_get_rate_locked(bus); div = min(s_rate / h_rate, BUS_AHB_DIV_MAX); if (div != 1) { bus_set_div(bus->u.system.hclk, div); ahb->div = div; } h_rate = clk_get_rate(bus->u.system.hclk); div = min(h_rate / p_rate, BUS_APB_DIV_MAX); if (div != 2) { bus_set_div(bus->u.system.pclk, div); apb->div = div; } return 0; } static struct clk_ops tegra_sbus_cmplx_ops = { .init = tegra12_sbus_cmplx_init, .set_rate = tegra12_sbus_cmplx_set_rate, .round_rate = tegra12_sbus_cmplx_round_rate, .round_rate_updown = tegra12_sbus_cmplx_round_updown, .shared_bus_update = tegra12_clk_sbus_update, }; /* Blink output functions */ static void tegra12_blink_clk_init(struct clk *c) { u32 val; val = pmc_readl(PMC_CTRL); c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF; c->mul = 1; val = pmc_readl(c->reg); if (val & PMC_BLINK_TIMER_ENB) { unsigned int on_off; on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) & PMC_BLINK_TIMER_DATA_ON_MASK; val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT; val &= PMC_BLINK_TIMER_DATA_OFF_MASK; on_off += val; /* each tick in the blink timer is 4 32KHz clocks */ c->div = on_off * 4; } else { c->div = 1; } } static int tegra12_blink_clk_enable(struct clk *c) { u32 val; val = pmc_readl(PMC_DPD_PADS_ORIDE); pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); val = pmc_readl(PMC_CTRL); pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL); pmc_readl(PMC_CTRL); return 0; } static void tegra12_blink_clk_disable(struct clk *c) { u32 val; val = pmc_readl(PMC_CTRL); pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL); val = pmc_readl(PMC_DPD_PADS_ORIDE); pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); pmc_readl(PMC_DPD_PADS_ORIDE); } static int tegra12_blink_clk_set_rate(struct clk *c, unsigned long rate) { unsigned long parent_rate = clk_get_rate(c->parent); if (rate >= parent_rate) { c->div = 1; pmc_writel(0, c->reg); } else { unsigned int on_off; u32 val; on_off = DIV_ROUND_UP(parent_rate / 8, rate); c->div = on_off * 8; val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) << PMC_BLINK_TIMER_DATA_ON_SHIFT; on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK; on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT; val |= on_off; val |= PMC_BLINK_TIMER_ENB; pmc_writel(val, c->reg); } pmc_readl(c->reg); return 0; } static struct clk_ops tegra_blink_clk_ops = { .init = &tegra12_blink_clk_init, .enable = &tegra12_blink_clk_enable, .disable = &tegra12_blink_clk_disable, .set_rate = &tegra12_blink_clk_set_rate, }; /* PLL Functions */ static int tegra12_pll_clk_wait_for_lock( struct clk *c, u32 lock_reg, u32 lock_bits) { #if USE_PLL_LOCK_BITS int i; u32 val = 0; for (i = 0; i < (c->u.pll.lock_delay / PLL_PRE_LOCK_DELAY + 1); i++) { udelay(PLL_PRE_LOCK_DELAY); if (c->flags & PLLX) val = clk_readlx(lock_reg); else val = clk_readl(lock_reg); if ((val & lock_bits) == lock_bits) { udelay(PLL_POST_LOCK_DELAY); return 0; } } /* PLLCX lock bits may fluctuate after the lock - do detailed reporting at debug level (phase lock bit happens to uniquely identify PLLCX) */ if (lock_bits & PLLCX_BASE_PHASE_LOCK) { pr_debug("Timed out waiting %s locks: %s %s not set\n", c->name, val & PLL_BASE_LOCK ? "" : "frequency_lock", val & PLLCX_BASE_PHASE_LOCK ? "" : "phase_lock"); pr_debug("base = 0x%x\n", val); if (c->flags & PLLX) { pr_debug("misc = 0x%x\n", clk_readlx(c->reg + PLL_MISC(c))); pr_debug("misc1 = 0x%x\n", clk_readlx(c->reg + PLL_MISCN(c, 1))); pr_debug("misc2 = 0x%x\n", clk_readlx(c->reg + PLL_MISCN(c, 2))); pr_debug("misc3 = 0x%x\n", clk_readlx(c->reg + PLL_MISCN(c, 3))); } else { pr_debug("misc = 0x%x\n", clk_readl(c->reg + PLL_MISC(c))); pr_debug("misc1 = 0x%x\n", clk_readl(c->reg + PLL_MISCN(c, 1))); pr_debug("misc2 = 0x%x\n", clk_readl(c->reg + PLL_MISCN(c, 2))); pr_debug("misc3 = 0x%x\n", clk_readl(c->reg + PLL_MISCN(c, 3))); } return -ETIMEDOUT; } else { pr_err("Timed out waiting for %s lock bit ([0x%x] = 0x%x)\n", c->name, lock_reg, val); return -ETIMEDOUT; } #endif udelay(c->u.pll.lock_delay); return 0; } static void usb_plls_hw_control_enable(u32 reg) { u32 val = clk_readl(reg); val |= USB_PLLS_USE_LOCKDET | USB_PLLS_SEQ_START_STATE; val &= ~USB_PLLS_ENABLE_SWCTL; val |= USB_PLLS_SEQ_START_STATE; pll_writel_delay(val, reg); val |= USB_PLLS_SEQ_ENABLE; pll_writel_delay(val, reg); } static void tegra12_utmi_param_configure(struct clk *c) { u32 reg; int i; unsigned long main_rate = clk_get_rate(c->parent->parent); for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { if (main_rate == utmi_parameters[i].osc_frequency) { break; } } if (i >= ARRAY_SIZE(utmi_parameters)) { pr_err("%s: Unexpected main rate %lu\n", __func__, main_rate); return; } reg = clk_readl(UTMIP_PLL_CFG2); /* Program UTMIP PLL stable and active counts */ /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); reg |= UTMIP_PLL_CFG2_STABLE_COUNT( utmi_parameters[i].stable_count); reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT( utmi_parameters[i].active_delay_count); /* Remove power downs from UTMIP PLL control bits */ reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP; reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP; reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP; reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN; clk_writel(reg, UTMIP_PLL_CFG2); /* Program UTMIP PLL delay and oscillator frequency counts */ reg = clk_readl(UTMIP_PLL_CFG1); reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT( utmi_parameters[i].enable_delay_count); reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT( utmi_parameters[i].xtal_freq_count); /* Remove power downs from UTMIP PLL control bits */ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP; reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; clk_writel(reg, UTMIP_PLL_CFG1); /* Setup HW control of UTMIPLL */ reg = clk_readl(UTMIPLL_HW_PWRDN_CFG0); reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE; clk_writel(reg, UTMIPLL_HW_PWRDN_CFG0); reg = clk_readl(UTMIP_PLL_CFG1); reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; pll_writel_delay(reg, UTMIP_PLL_CFG1); /* Setup SW override of UTMIPLL assuming USB2.0 ports are assigned to USB2 */ reg = clk_readl(UTMIPLL_HW_PWRDN_CFG0); reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL; reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; pll_writel_delay(reg, UTMIPLL_HW_PWRDN_CFG0); /* Enable HW control UTMIPLL */ reg = clk_readl(UTMIPLL_HW_PWRDN_CFG0); reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; pll_writel_delay(reg, UTMIPLL_HW_PWRDN_CFG0); } static void tegra12_pll_clk_init(struct clk *c) { u32 val = clk_readl(c->reg + PLL_BASE); u32 divn_mask = c->flags & PLLD ? PLLD_BASE_DIVN_MASK : PLL_BASE_DIVN_MASK; c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; if (c->flags & PLL_FIXED && !(val & PLL_BASE_OVERRIDE)) { const struct clk_pll_freq_table *sel; unsigned long input_rate = clk_get_rate(c->parent); c->u.pll.fixed_rate = PLLP_DEFAULT_FIXED_RATE; for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == c->u.pll.fixed_rate) { c->mul = sel->n; c->div = sel->m * sel->p; return; } } pr_err("Clock %s has unknown fixed frequency\n", c->name); BUG(); } else if (val & PLL_BASE_BYPASS) { c->mul = 1; c->div = 1; } else { c->mul = (val & divn_mask) >> PLL_BASE_DIVN_SHIFT; c->div = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; if (c->flags & PLLU) c->div *= (val & PLLU_BASE_POST_DIV) ? 1 : 2; else c->div *= (0x1 << ((val & PLL_BASE_DIVP_MASK) >> PLL_BASE_DIVP_SHIFT)); } if (c->flags & PLL_FIXED) { c->u.pll.fixed_rate = clk_get_rate_locked(c); } if (c->flags & PLLU) { /* Configure UTMI PLL power management */ tegra12_utmi_param_configure(c); /* Put PLLU under h/w control */ usb_plls_hw_control_enable(PLLU_HW_PWRDN_CFG0); val = clk_readl(c->reg + PLL_BASE); val &= ~PLLU_BASE_OVERRIDE; pll_writel_delay(val, c->reg + PLL_BASE); /* Set XUSB PLL pad pwr override and iddq */ val = xusb_padctl_readl(XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); val |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_PWR_OVRD; val |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_IDDQ; xusb_padctl_writel(val, XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); xusb_padctl_readl(XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); } } static int tegra12_pll_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); #if USE_PLL_LOCK_BITS /* toggle lock enable bit to reset lock detection circuit (couple register reads provide enough duration for reset pulse) */ val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLL_MISC_LOCK_ENABLE(c); clk_writel(val, c->reg + PLL_MISC(c)); val = clk_readl(c->reg + PLL_MISC(c)); val = clk_readl(c->reg + PLL_MISC(c)); val |= PLL_MISC_LOCK_ENABLE(c); clk_writel(val, c->reg + PLL_MISC(c)); #endif val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_BYPASS; val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK); return 0; } static void tegra12_pll_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); val = clk_readl(c->reg); val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE); if (tegra_platform_is_qt()) return; clk_writel(val, c->reg); } /* Special comparison frequency selection for PLLD at 12MHz refrence rate */ unsigned long get_pll_cfreq_special(struct clk *c, unsigned long input_rate, unsigned long rate, unsigned long *vco) { if (!(c->flags & PLLD) || (input_rate != 12000000)) return 0; *vco = c->u.pll.vco_min; if (rate <= 250000000) return 4000000; else if (rate <= 500000000) return 2000000; else return 1000000; } /* Common comparison frequency selection */ unsigned long get_pll_cfreq_common(struct clk *c, unsigned long input_rate, unsigned long rate, unsigned long *vco) { unsigned long cfreq = 0; switch (input_rate) { case 12000000: case 26000000: cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000; break; case 13000000: cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000; break; case 16800000: case 19200000: cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000; break; default: if (c->parent->flags & DIV_U71_FIXED) { /* PLLP_OUT1 rate is not in PLLA table */ pr_warn("%s: failed %s ref/out rates %lu/%lu\n", __func__, c->name, input_rate, rate); cfreq = input_rate/(input_rate/1000000); break; } pr_err("%s: Unexpected reference rate %lu\n", __func__, input_rate); BUG(); } /* Raise VCO to guarantee 0.5% accuracy, and vco min boundary */ *vco = max(200 * cfreq, c->u.pll.vco_min); return cfreq; } static u8 get_pll_cpcon(struct clk *c, u16 n) { if (c->flags & PLLD) { if (n >= 1000) return 15; else if (n >= 600) return 12; else if (n >= 300) return 8; else if (n >= 50) return 3; else return 2; } return c->u.pll.cpcon_default ? : OUT_OF_TABLE_CPCON; } static int tegra12_pll_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, p_div, old_base; unsigned long input_rate; const struct clk_pll_freq_table *sel; struct clk_pll_freq_table cfg; u32 divn_mask = c->flags & PLLD ? PLLD_BASE_DIVN_MASK : PLL_BASE_DIVN_MASK; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (tegra_platform_is_qt()) return 0; if (c->flags & PLL_FIXED) { int ret = 0; if (rate != c->u.pll.fixed_rate) { pr_err("%s: Can not change %s fixed rate %lu to %lu\n", __func__, c->name, c->u.pll.fixed_rate, rate); ret = -EINVAL; } return ret; } p_div = 0; input_rate = clk_get_rate(c->parent); /* Check if the target rate is tabulated */ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == rate) { if (c->flags & PLLU) { BUG_ON(sel->p < 1 || sel->p > 2); if (sel->p == 1) p_div = PLLU_BASE_POST_DIV; } else { BUG_ON(sel->p < 1); for (val = sel->p; val > 1; val >>= 1, p_div++); p_div <<= PLL_BASE_DIVP_SHIFT; } break; } } /* Configure out-of-table rate */ if (sel->input_rate == 0) { unsigned long cfreq, vco; BUG_ON(c->flags & PLLU); sel = &cfg; /* If available, use pll specific algorithm to select comparison frequency, and vco target */ cfreq = get_pll_cfreq_special(c, input_rate, rate, &vco); if (!cfreq) cfreq = get_pll_cfreq_common(c, input_rate, rate, &vco); for (cfg.output_rate = rate; cfg.output_rate < vco; p_div++) cfg.output_rate <<= 1; cfg.p = 0x1 << p_div; cfg.m = input_rate / cfreq; cfg.n = cfg.output_rate / cfreq; cfg.cpcon = get_pll_cpcon(c, cfg.n); if ((cfg.m > (PLL_BASE_DIVM_MASK >> PLL_BASE_DIVM_SHIFT)) || (cfg.n > (divn_mask >> PLL_BASE_DIVN_SHIFT)) || (p_div > (PLL_BASE_DIVP_MASK >> PLL_BASE_DIVP_SHIFT)) || (cfg.output_rate > c->u.pll.vco_max)) { pr_err("%s: Failed to set %s out-of-table rate %lu\n", __func__, c->name, rate); return -EINVAL; } p_div <<= PLL_BASE_DIVP_SHIFT; } c->mul = sel->n; c->div = sel->m * sel->p; old_base = val = clk_readl(c->reg + PLL_BASE); val &= ~(PLL_BASE_DIVM_MASK | divn_mask | ((c->flags & PLLU) ? PLLU_BASE_POST_DIV : PLL_BASE_DIVP_MASK)); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT) | p_div; if (val == old_base) return 0; if (c->state == ON) { tegra12_pll_clk_disable(c); val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE); } clk_writel(val, c->reg + PLL_BASE); if (c->flags & PLL_HAS_CPCON) { val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLL_MISC_CPCON_MASK; val |= sel->cpcon << PLL_MISC_CPCON_SHIFT; if (c->flags & (PLLU | PLLD)) { val &= ~PLL_MISC_LFCON_MASK; val |= PLLDU_LFCON << PLL_MISC_LFCON_SHIFT; } clk_writel(val, c->reg + PLL_MISC(c)); } if (c->state == ON) tegra12_pll_clk_enable(c); return 0; } static struct clk_ops tegra_pll_ops = { .init = tegra12_pll_clk_init, .enable = tegra12_pll_clk_enable, .disable = tegra12_pll_clk_disable, .set_rate = tegra12_pll_clk_set_rate, }; static void tegra12_pllp_clk_init(struct clk *c) { tegra12_pll_clk_init(c); tegra12_pllp_init_dependencies(c->u.pll.fixed_rate); } #ifdef CONFIG_PM_SLEEP static void tegra12_pllp_clk_resume(struct clk *c) { unsigned long rate = c->u.pll.fixed_rate; tegra12_pll_clk_init(c); BUG_ON(rate != c->u.pll.fixed_rate); } #endif static struct clk_ops tegra_pllp_ops = { .init = tegra12_pllp_clk_init, .enable = tegra12_pll_clk_enable, .disable = tegra12_pll_clk_disable, .set_rate = tegra12_pll_clk_set_rate, }; static int tegra12_plld_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { u32 val, mask, reg; u32 clear = 0; switch (p) { case TEGRA_CLK_PLLD_CSI_OUT_ENB: mask = PLLD_BASE_CSI_CLKENABLE | PLLD_BASE_CSI_CLKSOURCE; reg = c->reg + PLL_BASE; break; case TEGRA_CLK_MIPI_CSI_OUT_ENB: mask = PLLD_BASE_CSI_CLKENABLE; clear = PLLD_BASE_CSI_CLKSOURCE; reg = c->reg + PLL_BASE; break; case TEGRA_CLK_PLLD_DSI_OUT_ENB: mask = PLLD_MISC_DSI_CLKENABLE; reg = c->reg + PLL_MISC(c); break; case TEGRA_CLK_PLLD_MIPI_MUX_SEL: mask = PLLD_BASE_DSI_MUX_MASK; reg = c->reg + PLL_BASE; break; default: return -EINVAL; } val = clk_readl(reg); if (setting) { val |= mask; val &= ~clear; } else val &= ~mask; clk_writel(val, reg); return 0; } static struct clk_ops tegra_plld_ops = { .init = tegra12_pll_clk_init, .enable = tegra12_pll_clk_enable, .disable = tegra12_pll_clk_disable, .set_rate = tegra12_pll_clk_set_rate, .clk_cfg_ex = tegra12_plld_clk_cfg_ex, }; /* * Dynamic ramp PLLs: * PLLC2 and PLLC3 (PLLCX) * PLLX and PLLC (PLLXC) * * When scaling PLLC and PLLX, dynamic ramp is allowed for any transition that * changes NDIV only. As a matter of policy we will make sure that switching * between output rates above VCO minimum is always dynamic. The pre-requisite * for the above guarantee is the following configuration convention: * - pll configured with fixed MDIV * - when output rate is above VCO minimum PDIV = 0 (p-value = 1) * Switching between output rates below VCO minimum may or may not be dynamic, * and switching across VCO minimum is never dynamic. * * PLLC2 and PLLC3 in addition to dynamic ramp mechanism have also glitchless * output dividers. However dynamic ramp without overshoot is guaranteed only * when output divisor is less or equal 8. * * Of course, dynamic ramp is applied provided PLL is already enabled. */ /* * Common configuration policy for dynamic ramp PLLs: * - always set fixed M-value based on the reference rate * - always set P-value value 1:1 for output rates above VCO minimum, and * choose minimum necessary P-value for output rates below VCO minimum * - calculate N-value based on selected M and P */ static int pll_dyn_ramp_cfg(struct clk *c, struct clk_pll_freq_table *cfg, unsigned long rate, unsigned long input_rate, u32 *pdiv) { u32 p; if (!rate) return -EINVAL; p = DIV_ROUND_UP(c->u.pll.vco_min, rate); p = c->u.pll.round_p_to_pdiv(p, pdiv); if (IS_ERR_VALUE(p)) return -EINVAL; cfg->m = PLL_FIXED_MDIV(c, input_rate); cfg->p = p; cfg->output_rate = rate * cfg->p; if (cfg->output_rate > c->u.pll.vco_max) cfg->output_rate = c->u.pll.vco_max; cfg->n = cfg->output_rate * cfg->m / input_rate; /* can use PLLCX N-divider field layout for all dynamic ramp PLLs */ if (cfg->n > (PLLCX_BASE_DIVN_MASK >> PLL_BASE_DIVN_SHIFT)) return -EINVAL; return 0; } static int pll_dyn_ramp_find_cfg(struct clk *c, struct clk_pll_freq_table *cfg, unsigned long rate, unsigned long input_rate, u32 *pdiv) { const struct clk_pll_freq_table *sel; /* Check if the target rate is tabulated */ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == rate) { u32 p = c->u.pll.round_p_to_pdiv(sel->p, pdiv); BUG_ON(IS_ERR_VALUE(p)); BUG_ON(sel->m != PLL_FIXED_MDIV(c, input_rate)); *cfg = *sel; return 0; } } /* Configure out-of-table rate */ if (pll_dyn_ramp_cfg(c, cfg, rate, input_rate, pdiv)) { pr_err("%s: Failed to set %s out-of-table rate %lu\n", __func__, c->name, rate); return -EINVAL; } return 0; } static inline void pll_do_iddq(struct clk *c, u32 offs, u32 iddq_bit, bool set) { u32 val; if (c->flags & PLLX) val = clk_readlx(c->reg + offs); else val = clk_readl(c->reg + offs); if (set) val |= iddq_bit; else val &= ~iddq_bit; if (c->flags & PLLX) clk_writelx_delay(val, c->reg + offs); else clk_writel_delay(val, c->reg + offs); } static u8 pllcx_p[PLLCX_PDIV_MAX + 1] = { /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7 */ /* p: */ 1, 2, 3, 4, 6, 8, 12, 16 }; static u32 pllcx_round_p_to_pdiv(u32 p, u32 *pdiv) { int i; if (p) { for (i = 0; i <= PLLCX_PDIV_MAX; i++) { /* Do not use DIV3 p values - mapped to even PDIV */ if (i && ((i & 0x1) == 0)) continue; if (p <= pllcx_p[i]) { if (pdiv) *pdiv = i; return pllcx_p[i]; } } } return -EINVAL; } static void pllcx_update_dynamic_koef(struct clk *c, unsigned long input_rate, u32 n) { u32 val, n_threshold; switch (input_rate) { case 12000000: n_threshold = 70; break; case 13000000: case 26000000: n_threshold = 71; break; case 16800000: n_threshold = 55; break; case 19200000: n_threshold = 48; break; default: pr_err("%s: Unexpected reference rate %lu\n", __func__, input_rate); BUG(); return; } val = clk_readl(c->reg + PLL_MISC(c)); val &= ~(PLLCX_MISC_SDM_DIV_MASK | PLLCX_MISC_FILT_DIV_MASK); val |= n <= n_threshold ? PLLCX_MISC_DIV_LOW_RANGE : PLLCX_MISC_DIV_HIGH_RANGE; clk_writel(val, c->reg + PLL_MISC(c)); } static void pllcx_strobe(struct clk *c) { u32 reg = c->reg + PLL_MISC(c); u32 val = clk_readl(reg); val |= PLLCX_MISC_STROBE; pll_writel_delay(val, reg); val &= ~PLLCX_MISC_STROBE; clk_writel(val, reg); } static void pllcx_set_defaults(struct clk *c, unsigned long input_rate, u32 n) { u32 misc1val = PLLCX_MISC1_DEFAULT_VALUE; if (c->state != ON) misc1val |= PLLCX_MISC1_IDDQ; clk_writel(PLLCX_MISC_DEFAULT_VALUE, c->reg + PLL_MISC(c)); clk_writel(misc1val, c->reg + PLL_MISCN(c, 1)); clk_writel(PLLCX_MISC2_DEFAULT_VALUE, c->reg + PLL_MISCN(c, 2)); clk_writel(PLLCX_MISC3_DEFAULT_VALUE, c->reg + PLL_MISCN(c, 3)); pllcx_update_dynamic_koef(c, input_rate, n); } static void tegra12_pllcx_clk_init(struct clk *c) { unsigned long input_rate = clk_get_rate(c->parent); u32 m, n, p, val; /* clip vco_min to exact multiple of input rate to avoid crossover by rounding */ c->u.pll.vco_min = DIV_ROUND_UP(c->u.pll.vco_min, input_rate) * input_rate; c->min_rate = DIV_ROUND_UP(c->u.pll.vco_min, pllcx_p[PLLCX_PDIV_MAX]); val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; /* * PLLCX is not a boot PLL, it should be left disabled by boot-loader, * and no enabled module clocks should use it as a source during clock * init. */ BUG_ON(c->state == ON && !tegra_platform_is_linsim() && !is_tegra_hypervisor_mode()); /* * Most of PLLCX register fields are shadowed, and can not be read * directly from PLL h/w. Hence, actual PLLCX boot state is unknown. * Initialize PLL to default state: disabled, reset; shadow registers * loaded with default parameters; dividers are preset for half of * minimum VCO rate (the latter assured that shadowed divider settings * are within supported range). */ m = PLL_FIXED_MDIV(c, input_rate); n = m * c->u.pll.vco_min / input_rate; p = pllcx_p[1]; val = (m << PLL_BASE_DIVM_SHIFT) | (n << PLL_BASE_DIVN_SHIFT) | (1 << PLL_BASE_DIVP_SHIFT); clk_writel(val, c->reg + PLL_BASE); /* PLL disabled */ pllcx_set_defaults(c, input_rate, n); c->mul = n; c->div = m * p; } static int tegra12_pllcx_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); pll_do_iddq(c, PLL_MISCN(c, 1), PLLCX_MISC1_IDDQ, false); val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_BYPASS; val |= PLL_BASE_ENABLE; pll_writel_delay(val, c->reg + PLL_BASE); val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLLCX_MISC_RESET; pll_writel_delay(val, c->reg + PLL_MISC(c)); pllcx_strobe(c); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK | PLLCX_BASE_PHASE_LOCK); return 0; } static void tegra12_pllcx_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); val = clk_readl(c->reg); val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE); clk_writel(val, c->reg); val = clk_readl(c->reg + PLL_MISC(c)); val |= PLLCX_MISC_RESET; pll_writel_delay(val, c->reg + PLL_MISC(c)); pll_do_iddq(c, PLL_MISCN(c, 1), PLLCX_MISC1_IDDQ, true); } static int tegra12_pllcx_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, pdiv; unsigned long input_rate; struct clk_pll_freq_table cfg, old_cfg; const struct clk_pll_freq_table *sel = &cfg; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (tegra_platform_is_qt()) return 0; input_rate = clk_get_rate(c->parent); if (pll_dyn_ramp_find_cfg(c, &cfg, rate, input_rate, &pdiv)) return -EINVAL; c->mul = sel->n; c->div = sel->m * sel->p; val = clk_readl(c->reg + PLL_BASE); PLL_BASE_PARSE(PLLCX, old_cfg, val); old_cfg.p = pllcx_p[old_cfg.p]; BUG_ON(old_cfg.m != sel->m); if ((sel->n == old_cfg.n) && (sel->p == old_cfg.p)) return 0; #if PLLCX_USE_DYN_RAMP if (c->state == ON && ((sel->n == old_cfg.n) || PLLCX_IS_DYN(sel->p, old_cfg.p))) { /* * Dynamic ramp if PLL is enabled, and M divider is unchanged: * - Change P divider 1st if intermediate rate is below either * old or new rate. * - Change N divider with DFS strobe - target rate is either * final new rate or below old rate * - If divider has been changed, exit without waiting for lock. * Otherwise, wait for lock and change divider. */ if (sel->p > old_cfg.p) { val &= ~PLLCX_BASE_DIVP_MASK; val |= pdiv << PLL_BASE_DIVP_SHIFT; clk_writel(val, c->reg + PLL_BASE); } if (sel->n != old_cfg.n) { pllcx_update_dynamic_koef(c, input_rate, sel->n); val &= ~PLLCX_BASE_DIVN_MASK; val |= sel->n << PLL_BASE_DIVN_SHIFT; pll_writel_delay(val, c->reg + PLL_BASE); pllcx_strobe(c); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK | PLLCX_BASE_PHASE_LOCK); } if (sel->p < old_cfg.p) { val &= ~PLLCX_BASE_DIVP_MASK; val |= pdiv << PLL_BASE_DIVP_SHIFT; clk_writel(val, c->reg + PLL_BASE); } return 0; } #endif val &= ~(PLLCX_BASE_DIVN_MASK | PLLCX_BASE_DIVP_MASK); val |= (sel->n << PLL_BASE_DIVN_SHIFT) | (pdiv << PLL_BASE_DIVP_SHIFT); if (c->state == ON) { tegra12_pllcx_clk_disable(c); val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE); } pllcx_update_dynamic_koef(c, input_rate, sel->n); clk_writel(val, c->reg + PLL_BASE); if (c->state == ON) tegra12_pllcx_clk_enable(c); return 0; } #ifdef CONFIG_PM_SLEEP static void tegra12_pllcx_clk_resume_enable(struct clk *c) { unsigned long rate = clk_get_rate_all_locked(c->parent); u32 val = clk_readl(c->reg + PLL_BASE); enum clk_state state = c->state; if (val & PLL_BASE_ENABLE) return; /* already resumed */ /* Restore input divider */ val &= ~PLLCX_BASE_DIVM_MASK; val |= PLL_FIXED_MDIV(c, rate) << PLL_BASE_DIVM_SHIFT; clk_writel(val, c->reg + PLL_BASE); /* temporarily sync h/w and s/w states, final sync happens in tegra_clk_resume later */ c->state = OFF; pllcx_set_defaults(c, rate, c->mul); rate = clk_get_rate_all_locked(c); tegra12_pllcx_clk_set_rate(c, rate); tegra12_pllcx_clk_enable(c); c->state = state; } #endif static struct clk_ops tegra_pllcx_ops = { .init = tegra12_pllcx_clk_init, .enable = tegra12_pllcx_clk_enable, .disable = tegra12_pllcx_clk_disable, .set_rate = tegra12_pllcx_clk_set_rate, }; /* non-monotonic mapping below is not a typo */ static u8 pllxc_p[PLLXC_PDIV_MAX + 1] = { /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32 }; static u32 pllxc_round_p_to_pdiv(u32 p, u32 *pdiv) { if (!p || (p > PLLXC_SW_PDIV_MAX + 1)) return -EINVAL; if (pdiv) *pdiv = p - 1; return p; } static void pllxc_get_dyn_steps(struct clk *c, unsigned long input_rate, u32 *step_a, u32 *step_b) { switch (input_rate) { case 12000000: case 13000000: case 26000000: *step_a = 0x2B; *step_b = 0x0B; return; case 16800000: *step_a = 0x1A; *step_b = 0x09; return; case 19200000: *step_a = 0x12; *step_b = 0x08; return; default: pr_err("%s: Unexpected reference rate %lu\n", __func__, input_rate); BUG(); } } static void pllx_set_defaults(struct clk *c, unsigned long input_rate) { u32 val; u32 step_a, step_b; /* Only s/w dyn ramp control is supported */ val = clk_readlx(PLLX_HW_CTRL_CFG); BUG_ON(!(val & PLLX_HW_CTRL_CFG_SWCTRL) && !tegra_platform_is_linsim()); pllxc_get_dyn_steps(c, input_rate, &step_a, &step_b); val = step_a << PLLX_MISC2_DYNRAMP_STEPA_SHIFT; val |= step_b << PLLX_MISC2_DYNRAMP_STEPB_SHIFT; /* Get ready dyn ramp state machine, disable lock override */ clk_writelx(val, c->reg + PLL_MISCN(c, 2)); /* Enable outputs to CPUs and configure lock */ val = 0; #if USE_PLL_LOCK_BITS val |= PLL_MISC_LOCK_ENABLE(c); #endif clk_writelx(val, c->reg + PLL_MISC(c)); /* Check/set IDDQ */ val = clk_readlx(c->reg + PLL_MISCN(c, 3)); if (c->state == ON) { BUG_ON(val & PLLX_MISC3_IDDQ && !tegra_platform_is_linsim()); } else { val |= PLLX_MISC3_IDDQ; clk_writelx(val, c->reg + PLL_MISCN(c, 3)); } } static void pllc_set_defaults(struct clk *c, unsigned long input_rate) { u32 val; u32 step_a, step_b; /* Get ready dyn ramp state machine */ pllxc_get_dyn_steps(c, input_rate, &step_a, &step_b); val = step_a << PLLC_MISC1_DYNRAMP_STEPA_SHIFT; val |= step_b << PLLC_MISC1_DYNRAMP_STEPB_SHIFT; clk_writel(val, c->reg + PLL_MISCN(c, 1)); /* Configure lock and check/set IDDQ */ val = clk_readl(c->reg + PLL_BASE); val &= ~PLLC_BASE_LOCK_OVERRIDE; clk_writel(val, c->reg + PLL_BASE); val = clk_readl(c->reg + PLL_MISC(c)); #if USE_PLL_LOCK_BITS val |= PLLC_MISC_LOCK_ENABLE; #else val &= ~PLLC_MISC_LOCK_ENABLE; #endif clk_writel(val, c->reg + PLL_MISC(c)); if (c->state == ON) { BUG_ON(val & PLLC_MISC_IDDQ && !tegra_platform_is_linsim()); } else { val |= PLLC_MISC_IDDQ; clk_writel(val, c->reg + PLL_MISC(c)); } } static void tegra12_pllxc_clk_init(struct clk *c) { unsigned long input_rate = clk_get_rate(c->parent); u32 m, p, val; /* clip vco_min to exact multiple of input rate to avoid crossover by rounding */ c->u.pll.vco_min = DIV_ROUND_UP(c->u.pll.vco_min, input_rate) * input_rate; c->min_rate = DIV_ROUND_UP(c->u.pll.vco_min, pllxc_p[PLLXC_SW_PDIV_MAX]); if (c->flags & PLLX) val = clk_readlx(c->reg + PLL_BASE); else val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; m = (val & PLLXC_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; p = (val & PLLXC_BASE_DIVP_MASK) >> PLL_BASE_DIVP_SHIFT; BUG_ON(p > PLLXC_PDIV_MAX); p = pllxc_p[p]; c->div = m * p; c->mul = (val & PLLXC_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; if (c->flags & PLLX) pllx_set_defaults(c, input_rate); else pllc_set_defaults(c, input_rate); } static int tegra12_pllxc_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); if (c->flags & PLLX) { pll_do_iddq(c, PLL_MISCN(c, 3), PLLX_MISC3_IDDQ, false); val = clk_readlx(c->reg + PLL_BASE); val |= PLL_BASE_ENABLE; clk_writelx(val, c->reg + PLL_BASE); } else { pll_do_iddq(c, PLL_MISC(c), PLLC_MISC_IDDQ, false); val = clk_readl(c->reg + PLL_BASE); val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); } tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK); return 0; } static void tegra12_pllxc_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); if (c->flags & PLLX) { val = clk_readlx(c->reg + PLL_BASE); val &= ~PLL_BASE_ENABLE; clk_writelx(val, c->reg + PLL_BASE); } else { val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); } if (c->flags & PLLX) pll_do_iddq(c, PLL_MISCN(c, 3), PLLX_MISC3_IDDQ, true); else pll_do_iddq(c, PLL_MISC(c), PLLC_MISC_IDDQ, true); } #define PLLXC_DYN_RAMP(pll_misc, reg) \ do { \ u32 misc = clk_readl((reg)); \ \ misc &= ~pll_misc##_NDIV_NEW_MASK; \ misc |= sel->n << pll_misc##_NDIV_NEW_SHIFT; \ pll_writel_delay(misc, (reg)); \ \ misc |= pll_misc##_EN_DYNRAMP; \ clk_writel(misc, (reg)); \ tegra12_pll_clk_wait_for_lock(c, (reg), \ pll_misc##_DYNRAMP_DONE); \ \ val &= ~PLLXC_BASE_DIVN_MASK; \ val |= sel->n << PLL_BASE_DIVN_SHIFT; \ pll_writel_delay(val, c->reg + PLL_BASE); \ \ misc &= ~pll_misc##_EN_DYNRAMP; \ pll_writel_delay(misc, (reg)); \ } while (0) #define PLLX_DYN_RAMP(pll_misc, reg) \ do { \ u32 misc = clk_readlx((reg)); \ \ misc &= ~pll_misc##_NDIV_NEW_MASK; \ misc |= sel->n << pll_misc##_NDIV_NEW_SHIFT; \ pll_writelx_delay(misc, (reg)); \ \ misc |= pll_misc##_EN_DYNRAMP; \ clk_writelx(misc, (reg)); \ tegra12_pll_clk_wait_for_lock(c, (reg), \ pll_misc##_DYNRAMP_DONE); \ \ val &= ~PLLXC_BASE_DIVN_MASK; \ val |= sel->n << PLL_BASE_DIVN_SHIFT; \ pll_writelx_delay(val, c->reg + PLL_BASE); \ \ misc &= ~pll_misc##_EN_DYNRAMP; \ pll_writelx_delay(misc, (reg)); \ } while (0) static int tegra12_pllxc_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, pdiv; unsigned long input_rate; struct clk_pll_freq_table cfg, old_cfg; const struct clk_pll_freq_table *sel = &cfg; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (tegra_platform_is_qt()) return 0; input_rate = clk_get_rate(c->parent); if (pll_dyn_ramp_find_cfg(c, &cfg, rate, input_rate, &pdiv)) return -EINVAL; c->mul = sel->n; c->div = sel->m * sel->p; if (c->flags & PLLX) val = clk_readlx(c->reg + PLL_BASE); else val = clk_readl(c->reg + PLL_BASE); PLL_BASE_PARSE(PLLXC, old_cfg, val); old_cfg.p = pllxc_p[old_cfg.p]; if ((sel->m == old_cfg.m) && (sel->n == old_cfg.n) && (sel->p == old_cfg.p)) return 0; #if PLLXC_USE_DYN_RAMP /* * Dynamic ramp can be used if M, P dividers are unchanged * (coveres superset of conventional dynamic ramps) */ if ((c->state == ON) && (sel->m == old_cfg.m) && (sel->p == old_cfg.p)) { if (c->flags & PLLX) { u32 reg = c->reg + PLL_MISCN(c, 2); PLLX_DYN_RAMP(PLLX_MISC2, reg); } else { u32 reg = c->reg + PLL_MISCN(c, 1); PLLXC_DYN_RAMP(PLLC_MISC1, reg); } return 0; } #endif if (c->state == ON) { /* Use "ENABLE" pulse without placing PLL into IDDQ */ val &= ~PLL_BASE_ENABLE; if (c->flags & PLLX) pll_writelx_delay(val, c->reg + PLL_BASE); else pll_writel_delay(val, c->reg + PLL_BASE); } val &= ~(PLLXC_BASE_DIVM_MASK | PLLXC_BASE_DIVN_MASK | PLLXC_BASE_DIVP_MASK); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT) | (pdiv << PLL_BASE_DIVP_SHIFT); if (c->flags & PLLX) clk_writelx(val, c->reg + PLL_BASE); else clk_writel(val, c->reg + PLL_BASE); if (c->state == ON) { val |= PLL_BASE_ENABLE; if (c->flags & PLLX) clk_writelx(val, c->reg + PLL_BASE); else clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK); } return 0; } #ifdef CONFIG_PM_SLEEP static void tegra12_pllxc_clk_resume_enable(struct clk *c) { unsigned long rate = clk_get_rate_all_locked(c->parent); enum clk_state state = c->state; if (c->flags & PLLX) { if (clk_readlx(c->reg + PLL_BASE) & PLL_BASE_ENABLE) return; /* already resumed */ } else { if (clk_readl(c->reg + PLL_BASE) & PLL_BASE_ENABLE) return; /* already resumed */ } /* temporarily sync h/w and s/w states, final sync happens in tegra_clk_resume later */ c->state = OFF; if (c->flags & PLLX) pllx_set_defaults(c, rate); else pllc_set_defaults(c, rate); rate = clk_get_rate_all_locked(c); tegra12_pllxc_clk_set_rate(c, rate); tegra12_pllxc_clk_enable(c); c->state = state; } #endif static struct clk_ops tegra_pllxc_ops = { .init = tegra12_pllxc_clk_init, .enable = tegra12_pllxc_clk_enable, .disable = tegra12_pllxc_clk_disable, .set_rate = tegra12_pllxc_clk_set_rate, }; /* FIXME: pllm suspend/resume */ /* non-monotonic mapping below is not a typo */ static u8 pllm_p[PLLM_PDIV_MAX + 1] = { /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32 }; static u32 pllm_round_p_to_pdiv(u32 p, u32 *pdiv) { if (!p || (p > PLLM_SW_PDIV_MAX + 1)) return -EINVAL; if (pdiv) *pdiv = p - 1; return p; } static void pllm_set_defaults(struct clk *c, unsigned long input_rate) { u32 val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLLM_MISC_LOCK_OVERRIDE; #if USE_PLL_LOCK_BITS val &= ~PLLM_MISC_LOCK_DISABLE; #else val |= PLLM_MISC_LOCK_DISABLE; #endif if (c->state != ON) val |= PLLM_MISC_IDDQ; else BUG_ON(val & PLLM_MISC_IDDQ && !tegra_platform_is_linsim()); clk_writel(val, c->reg + PLL_MISC(c)); } static void tegra12_pllm_clk_init(struct clk *c) { unsigned long input_rate = clk_get_rate(c->parent); u32 m, p, val; /* clip vco_min to exact multiple of input rate to avoid crossover by rounding */ c->u.pll.vco_min = DIV_ROUND_UP(c->u.pll.vco_min, input_rate) * input_rate; c->min_rate = DIV_ROUND_UP(c->u.pll.vco_min, pllm_p[PLLM_SW_PDIV_MAX]); val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) { c->state = (val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE) ? ON : OFF; /* Tegra12 has bad default value of PMC_PLLM_WB0_OVERRIDE. * If bootloader does not initialize PLLM, kernel has to * initialize the register with sane value. */ if (c->state == OFF) { val = pmc_readl(PMC_PLLM_WB0_OVERRIDE); m = (val & PLLM_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; if (m != PLL_FIXED_MDIV(c, input_rate)) { /* Copy DIVM and DIVN from PLLM_BASE */ pr_info("%s: Fixing DIVM and DIVN\n", __func__); val = clk_readl(c->reg + PLL_BASE); val &= (PLLM_BASE_DIVM_MASK | PLLM_BASE_DIVN_MASK); pmc_writel(val, PMC_PLLM_WB0_OVERRIDE); } } val = pmc_readl(PMC_PLLM_WB0_OVERRIDE_2); p = (val & PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK) >> PMC_PLLM_WB0_OVERRIDE_2_DIVP_SHIFT; val = pmc_readl(PMC_PLLM_WB0_OVERRIDE); } else { val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; p = (val & PLLM_BASE_DIVP_MASK) >> PLL_BASE_DIVP_SHIFT; } m = (val & PLLM_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; BUG_ON(m != PLL_FIXED_MDIV(c, input_rate) && tegra_platform_is_silicon()); c->div = m * pllm_p[p]; c->mul = (val & PLLM_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; pllm_set_defaults(c, input_rate); } static int tegra12_pllm_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); pll_do_iddq(c, PLL_MISC(c), PLLM_MISC_IDDQ, false); /* Just enable both base and override - one would work */ val = clk_readl(c->reg + PLL_BASE); val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE; pmc_writel(val, PMC_PLLP_WB0_OVERRIDE); val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLL_BASE_LOCK); return 0; } static void tegra12_pllm_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); /* Just disable both base and override - one would work */ val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE; pmc_writel(val, PMC_PLLP_WB0_OVERRIDE); val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); pll_do_iddq(c, PLL_MISC(c), PLLM_MISC_IDDQ, true); } static int tegra12_pllm_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, pdiv; unsigned long input_rate; struct clk_pll_freq_table cfg; const struct clk_pll_freq_table *sel = &cfg; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->state == ON) { if (rate != clk_get_rate_locked(c)) { pr_err("%s: Can not change memory %s rate in flight\n", __func__, c->name); return -EINVAL; } return 0; } input_rate = clk_get_rate(c->parent); if (pll_dyn_ramp_find_cfg(c, &cfg, rate, input_rate, &pdiv)) return -EINVAL; c->mul = sel->n; c->div = sel->m * sel->p; val = pmc_readl(PMC_PLLP_WB0_OVERRIDE); if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) { val = pmc_readl(PMC_PLLM_WB0_OVERRIDE_2); val &= ~PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK; val |= pdiv << PMC_PLLM_WB0_OVERRIDE_2_DIVP_SHIFT; pmc_writel(val, PMC_PLLM_WB0_OVERRIDE_2); val = pmc_readl(PMC_PLLM_WB0_OVERRIDE); val &= ~(PLLM_BASE_DIVM_MASK | PLLM_BASE_DIVN_MASK); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT); pmc_writel(val, PMC_PLLM_WB0_OVERRIDE); } else { val = clk_readl(c->reg + PLL_BASE); val &= ~(PLLM_BASE_DIVM_MASK | PLLM_BASE_DIVN_MASK | PLLM_BASE_DIVP_MASK); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT) | (pdiv << PLL_BASE_DIVP_SHIFT); clk_writel(val, c->reg + PLL_BASE); } return 0; } static struct clk_ops tegra_pllm_ops = { .init = tegra12_pllm_clk_init, .enable = tegra12_pllm_clk_enable, .disable = tegra12_pllm_clk_disable, .set_rate = tegra12_pllm_clk_set_rate, }; static u8 pllss_p[PLLSS_PDIV_MAX + 1] = { /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32 }; static u32 pllss_round_p_to_pdiv(u32 p, u32 *pdiv) { if (!p || (p > PLLSS_SW_PDIV_MAX + 1)) return -EINVAL; if (pdiv) *pdiv = p - 1; return p; } static void pllss_set_defaults(struct clk *c, unsigned long input_rate) { u32 val; clk_writel(PLLSS_MISC_DEFAULT_VALUE, c->reg + PLL_MISC(c)); if (strcmp(c->name, "pll_dp") == 0) { clk_writel(PLLDP_SS_CFG_0_DEFAULT_VALUE, c->reg + PLLSS_CFG(c)); clk_writel(PLLDP_SS_CTRL1_0_DEFAULT_VALUE, c->reg + PLLSS_CTRL1(c)); clk_writel(PLLDP_SS_CTRL2_0_DEFAULT_VALUE, c->reg + PLLSS_CTRL2(c)); } else { clk_writel(PLLSS_CFG_DEFAULT_VALUE, c->reg + PLLSS_CFG(c)); clk_writel(PLLSS_CTRL1_DEFAULT_VALUE, c->reg + PLLSS_CTRL1(c)); clk_writel(PLLSS_CTRL2_DEFAULT_VALUE, c->reg + PLLSS_CTRL2(c)); } val = clk_readl(c->reg + PLL_MISC(c)); val &= ~(PLLSS_MISC_KCP_MASK); val |= (c->u.pll.cpcon_default << PLLSS_MISC_KCP_SHIFT); #if USE_PLL_LOCK_BITS val |= PLLSS_MISC_LOCK_ENABLE; #else val &= ~PLLSS_MISC_LOCK_ENABLE; #endif clk_writel(val, c->reg + PLL_MISC(c)); val = clk_readl(c->reg + PLL_BASE); if (c->state != ON) val |= PLLSS_BASE_IDDQ; else BUG_ON(val & PLLSS_BASE_IDDQ && !tegra_platform_is_linsim()); val &= ~PLLSS_BASE_LOCK_OVERRIDE; clk_writel(val, c->reg + PLL_BASE); } static void tegra12_pllss_clk_init(struct clk *c) { unsigned long input_rate; u32 m, n, p_div, val; val = clk_readl(c->reg + PLL_BASE); BUG_ON(((val & PLLSS_BASE_SOURCE_MASK) >> PLLSS_BASE_SOURCE_SHIFT) != 0); input_rate = clk_get_rate(c->parent); /* clip vco_min to exact multiple of input rate to avoid crossover by rounding */ c->u.pll.vco_min = DIV_ROUND_UP(c->u.pll.vco_min, input_rate) * input_rate; c->min_rate = DIV_ROUND_UP(c->u.pll.vco_min, pllss_p[PLLSS_SW_PDIV_MAX]); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; if (c->state == OFF){ /* Reset default value of those PLLs are not safe. For example, they cause problem in LP0 resume. Replace them here with the safe value. */ m = PLL_FIXED_MDIV(c, input_rate); n = c->u.pll.vco_min / input_rate * m; p_div = PLLSS_SW_PDIV_MAX; val &= ~PLLSS_BASE_DIVM_MASK; val &= ~PLLSS_BASE_DIVN_MASK; val &= ~PLLSS_BASE_DIVP_MASK; val |= m << PLL_BASE_DIVM_SHIFT; val |= n << PLL_BASE_DIVN_SHIFT; val |= p_div << PLL_BASE_DIVP_SHIFT; clk_writel(val, c->reg + PLL_BASE); pllss_set_defaults(c, input_rate); } else pr_info("%s was initialized by BootLoader\n", c->name); m = (val & PLLSS_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; n = (val & PLLSS_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; p_div = (val & PLLSS_BASE_DIVP_MASK) >> PLL_BASE_DIVP_SHIFT; c->div = m * pllss_p[p_div]; c->mul = n; pr_debug("%s: val=%08x m=%d n=%d p_div=%d input_rate=%lu\n", c->name, val, m, n, p_div, input_rate); } static int tegra12_pllss_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); pll_do_iddq(c, PLL_BASE, PLLSS_BASE_IDDQ, false); val = clk_readl(c->reg + PLL_BASE); val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_BASE, PLLSS_BASE_LOCK); return 0; } static void tegra12_pllss_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); pll_do_iddq(c, PLL_BASE, PLLSS_BASE_IDDQ, true); } static int tegra12_pllss_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, pdiv, old_base; unsigned long input_rate; struct clk_pll_freq_table cfg, old_cfg; const struct clk_pll_freq_table *sel = &cfg; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (tegra_platform_is_qt()) return 0; input_rate = clk_get_rate(c->parent); if (pll_dyn_ramp_find_cfg(c, &cfg, rate, input_rate, &pdiv)) return -EINVAL; c->mul = sel->n; c->div = sel->m * sel->p; val = clk_readl(c->reg + PLL_BASE); PLL_BASE_PARSE(PLLSS, old_cfg, val); old_cfg.p = pllss_p[old_cfg.p]; if ((sel->m == old_cfg.m) && (sel->n == old_cfg.n) && (sel->p == old_cfg.p)) return 0; val = old_base = clk_readl(c->reg + PLL_BASE); val &= ~(PLLSS_BASE_DIVM_MASK | PLLSS_BASE_DIVN_MASK | PLLSS_BASE_DIVP_MASK); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT) | (pdiv << PLL_BASE_DIVP_SHIFT); if (val == old_base) return 0; if (c->state == ON) { /* Use "ENABLE" pulse without placing PLL into IDDQ */ val &= ~PLL_BASE_ENABLE; old_base &= ~PLL_BASE_ENABLE; pll_writel_delay(old_base, c->reg + PLL_BASE); } clk_writel(val, c->reg + PLL_BASE); if (c->state == ON) { val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock( c, c->reg + PLL_BASE, PLLSS_BASE_LOCK); } return 0; } #ifdef CONFIG_PM_SLEEP static void tegra12_pllss_clk_resume_enable(struct clk *c) { unsigned long rate = clk_get_rate_all_locked(c->parent); u32 val = clk_readl(c->reg + PLL_BASE); enum clk_state state = c->state; if (val & PLL_BASE_ENABLE) return; /* already resumed */ /* Restore input divider */ val &= ~PLLSS_BASE_DIVM_MASK; val |= PLL_FIXED_MDIV(c, rate) << PLL_BASE_DIVM_SHIFT; clk_writel(val, c->reg + PLL_BASE); /* temporarily sync h/w and s/w states, final sync happens in tegra_clk_resume later */ c->state = OFF; pllss_set_defaults(c, rate); rate = clk_get_rate_all_locked(c); tegra12_pllss_clk_set_rate(c, rate); tegra12_pllss_clk_enable(c); c->state = state; } #endif static struct clk_ops tegra_pllss_ops = { .init = tegra12_pllss_clk_init, .enable = tegra12_pllss_clk_enable, .disable = tegra12_pllss_clk_disable, .set_rate = tegra12_pllss_clk_set_rate, /* s/w policy, no set_parent, always use tegra_pll_ref */ }; /* non-monotonic mapping below is not a typo */ static u8 pllre_p[PLLRE_PDIV_MAX + 1] = { /* PDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32 }; static u32 pllre_round_p_to_pdiv(u32 p, u32 *pdiv) { if (!p || (p > PLLRE_SW_PDIV_MAX + 1)) return -EINVAL; if (pdiv) *pdiv = p - 1; return p; } static void pllre_set_defaults(struct clk *c, unsigned long input_rate) { u32 val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLLRE_MISC_LOCK_OVERRIDE; #if USE_PLL_LOCK_BITS val |= PLLRE_MISC_LOCK_ENABLE; #else val &= ~PLLRE_MISC_LOCK_ENABLE; #endif if (c->state != ON) val |= PLLRE_MISC_IDDQ; else BUG_ON(val & PLLRE_MISC_IDDQ && !tegra_platform_is_linsim()); clk_writel(val, c->reg + PLL_MISC(c)); } static void tegra12_pllre_clk_init(struct clk *c) { unsigned long input_rate = clk_get_rate(c->parent); u32 m, val; /* clip vco_min to exact multiple of input rate to avoid crossover by rounding */ c->u.pll.vco_min = DIV_ROUND_UP(c->u.pll.vco_min, input_rate) * input_rate; c->min_rate = c->u.pll.vco_min; val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; if (!val) { /* overwrite h/w por state with min setting */ m = PLL_FIXED_MDIV(c, input_rate); val = (m << PLL_BASE_DIVM_SHIFT) | (c->min_rate / input_rate << PLL_BASE_DIVN_SHIFT); clk_writel(val, c->reg + PLL_BASE); } m = (val & PLLRE_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; BUG_ON(m != PLL_FIXED_MDIV(c, input_rate)); c->div = m; c->mul = (val & PLLRE_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; pllre_set_defaults(c, input_rate); } static int tegra12_pllre_clk_enable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); pll_do_iddq(c, PLL_MISC(c), PLLRE_MISC_IDDQ, false); val = clk_readl(c->reg + PLL_BASE); val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock(c, c->reg + PLL_MISC(c), PLLRE_MISC_LOCK); return 0; } static void tegra12_pllre_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); pll_do_iddq(c, PLL_MISC(c), PLLRE_MISC_IDDQ, true); } static int tegra12_pllre_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, old_base; unsigned long input_rate; struct clk_pll_freq_table cfg; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (rate < c->min_rate) { pr_err("%s: Failed to set %s rate %lu\n", __func__, c->name, rate); return -EINVAL; } input_rate = clk_get_rate(c->parent); cfg.m = PLL_FIXED_MDIV(c, input_rate); cfg.n = rate * cfg.m / input_rate; c->mul = cfg.n; c->div = cfg.m; val = old_base = clk_readl(c->reg + PLL_BASE); val &= ~(PLLRE_BASE_DIVM_MASK | PLLRE_BASE_DIVN_MASK); val |= (cfg.m << PLL_BASE_DIVM_SHIFT) | (cfg.n << PLL_BASE_DIVN_SHIFT); if (val == old_base) return 0; if (c->state == ON) { /* Use "ENABLE" pulse without placing PLL into IDDQ */ val &= ~PLL_BASE_ENABLE; old_base &= ~PLL_BASE_ENABLE; pll_writel_delay(old_base, c->reg + PLL_BASE); } clk_writel(val, c->reg + PLL_BASE); if (c->state == ON) { val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock( c, c->reg + PLL_MISC(c), PLLRE_MISC_LOCK); } return 0; } static struct clk_ops tegra_pllre_ops = { .init = tegra12_pllre_clk_init, .enable = tegra12_pllre_clk_enable, .disable = tegra12_pllre_clk_disable, .set_rate = tegra12_pllre_clk_set_rate, }; static void tegra12_pllre_out_clk_init(struct clk *c) { u32 p, val; val = clk_readl(c->reg); p = (val & PLLRE_BASE_DIVP_MASK) >> PLLRE_BASE_DIVP_SHIFT; BUG_ON(p > PLLRE_PDIV_MAX); p = pllre_p[p]; c->div = p; c->mul = 1; c->state = c->parent->state; } static int tegra12_pllre_out_clk_enable(struct clk *c) { return 0; } static void tegra12_pllre_out_clk_disable(struct clk *c) { } static int tegra12_pllre_out_clk_set_rate(struct clk *c, unsigned long rate) { u32 val, p, pdiv; unsigned long input_rate, flags; pr_debug("%s: %s %lu\n", __func__, c->name, rate); clk_lock_save(c->parent, &flags); input_rate = clk_get_rate_locked(c->parent); p = DIV_ROUND_UP(input_rate, rate); p = c->parent->u.pll.round_p_to_pdiv(p, &pdiv); if (IS_ERR_VALUE(p)) { pr_err("%s: Failed to set %s rate %lu\n", __func__, c->name, rate); clk_unlock_restore(c->parent, &flags); return -EINVAL; } c->div = p; val = clk_readl(c->reg); val &= ~PLLRE_BASE_DIVP_MASK; val |= pdiv << PLLRE_BASE_DIVP_SHIFT; clk_writel(val, c->reg); clk_unlock_restore(c->parent, &flags); return 0; } static struct clk_ops tegra_pllre_out_ops = { .init = tegra12_pllre_out_clk_init, .enable = tegra12_pllre_out_clk_enable, .disable = tegra12_pllre_out_clk_disable, .set_rate = tegra12_pllre_out_clk_set_rate, }; #ifdef CONFIG_PM_SLEEP /* Resume both pllre_vco and pllre_out */ static void tegra12_pllre_clk_resume_enable(struct clk *c) { u32 pdiv; u32 val = clk_readl(c->reg + PLL_BASE); unsigned long rate = clk_get_rate_all_locked(c->parent->parent); enum clk_state state = c->parent->state; if (val & PLL_BASE_ENABLE) return; /* already resumed */ /* temporarily sync h/w and s/w states, final sync happens in tegra_clk_resume later */ c->parent->state = OFF; pllre_set_defaults(c->parent, rate); /* restore PLLRE VCO feedback loop (m, n) */ rate = clk_get_rate_all_locked(c->parent); tegra12_pllre_clk_set_rate(c->parent, rate); /* restore PLLRE post-divider */ c->parent->u.pll.round_p_to_pdiv(c->div, &pdiv); val = clk_readl(c->reg); val &= ~PLLRE_BASE_DIVP_MASK; val |= pdiv << PLLRE_BASE_DIVP_SHIFT; clk_writel(val, c->reg); tegra12_pllre_clk_enable(c->parent); c->parent->state = state; } #endif /* non-monotonic mapping below is not a typo */ static u8 plle_p[PLLE_CMLDIV_MAX + 1] = { /* CMLDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32 }; static inline void select_pll_e_input(u32 aux_reg) { #if USE_PLLE_INPUT_PLLRE aux_reg |= PLLE_AUX_PLLRE_SEL; #else aux_reg &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); #endif clk_writel(aux_reg, PLLE_AUX); } static void tegra12_plle_clk_init(struct clk *c) { u32 val, p; struct clk *pll_ref = tegra_get_clock_by_name("pll_ref"); struct clk *re_vco = tegra_get_clock_by_name("pll_re_vco"); struct clk *pllp = tegra_get_clock_by_name("pllp"); #if USE_PLLE_INPUT_PLLRE struct clk *ref = re_vco; #else struct clk *ref = pll_ref; #endif val = clk_readl(c->reg + PLL_BASE); c->state = (val & PLL_BASE_ENABLE) ? ON : OFF; c->mul = (val & PLLE_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT; c->div = (val & PLLE_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT; p = (val & PLLE_BASE_DIVCML_MASK) >> PLLE_BASE_DIVCML_SHIFT; c->div *= plle_p[p]; val = clk_readl(PLLE_AUX); c->parent = (val & PLLE_AUX_PLLRE_SEL) ? re_vco : (val & PLLE_AUX_PLLP_SEL) ? pllp : pll_ref; if (c->parent != ref) { if (c->state == ON) { WARN(1, "%s: pll_e is left enabled with %s input\n", __func__, c->parent->name); } else { c->parent = ref; select_pll_e_input(val); } } } static void tegra12_plle_clk_disable(struct clk *c) { u32 val; pr_debug("%s on clock %s\n", __func__, c->name); /* FIXME: do we need to restore other s/w controls ? */ val = clk_readl(c->reg + PLL_BASE); val &= ~PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); val = clk_readl(c->reg + PLL_MISC(c)); val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; pll_writel_delay(val, c->reg + PLL_MISC(c)); /* Set XUSB PLL pad pwr override and iddq */ val = xusb_padctl_readl(XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); val |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_PWR_OVRD; val |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_IDDQ; xusb_padctl_writel(val, XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); xusb_padctl_readl(XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); } static int tegra12_plle_clk_enable(struct clk *c) { u32 val; const struct clk_pll_freq_table *sel; unsigned long rate = c->u.pll.fixed_rate; unsigned long input_rate = clk_get_rate(c->parent); if (c->state == ON) { /* BL left plle enabled - don't change configuartion */ pr_warn("%s: pll_e is already enabled\n", __func__); return 0; } for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { if (sel->input_rate == input_rate && sel->output_rate == rate) break; } if (sel->input_rate == 0) { pr_err("%s: %s input rate %lu is out-of-table\n", __func__, c->name, input_rate); return -EINVAL; } /* setup locking configuration, s/w control of IDDQ and enable modes, take pll out of IDDQ via s/w control, setup VREG */ val = clk_readl(c->reg + PLL_BASE); val &= ~PLLE_BASE_LOCK_OVERRIDE; clk_writel(val, c->reg + PLL_BASE); val = clk_readl(c->reg + PLL_MISC(c)); val |= PLLE_MISC_LOCK_ENABLE; val |= PLLE_MISC_IDDQ_SW_CTRL; val &= ~PLLE_MISC_IDDQ_SW_VALUE; val |= PLLE_MISC_PLLE_PTS; clk_writel(val, c->reg + PLL_MISC(c)); udelay(5); /* configure dividers, disable SS */ val = clk_readl(PLLE_SS_CTRL); val |= PLLE_SS_DISABLE; clk_writel(val, PLLE_SS_CTRL); val = clk_readl(c->reg + PLL_BASE); val &= ~(PLLE_BASE_DIVM_MASK | PLLE_BASE_DIVN_MASK | PLLE_BASE_DIVCML_MASK); val |= (sel->m << PLL_BASE_DIVM_SHIFT) | (sel->n << PLL_BASE_DIVN_SHIFT) | (sel->cpcon << PLLE_BASE_DIVCML_SHIFT); pll_writel_delay(val, c->reg + PLL_BASE); c->mul = sel->n; c->div = sel->m * sel->p; /* enable and lock pll */ val |= PLL_BASE_ENABLE; clk_writel(val, c->reg + PLL_BASE); tegra12_pll_clk_wait_for_lock( c, c->reg + PLL_MISC(c), PLLE_MISC_LOCK); #if USE_PLLE_SS val = clk_readl(PLLE_SS_CTRL); val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); val &= ~PLLE_SS_COEFFICIENTS_MASK; val |= PLLE_SS_COEFFICIENTS_VAL; clk_writel(val, PLLE_SS_CTRL); val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); pll_writel_delay(val, PLLE_SS_CTRL); val &= ~PLLE_SS_CNTL_INTERP_RESET; pll_writel_delay(val, PLLE_SS_CTRL); #endif #if !USE_PLLE_SWCTL /* switch pll under h/w control */ val = clk_readl(c->reg + PLL_MISC(c)); val &= ~PLLE_MISC_IDDQ_SW_CTRL; clk_writel(val, c->reg + PLL_MISC(c)); val = clk_readl(PLLE_AUX); val |= PLLE_AUX_USE_LOCKDET | PLLE_AUX_SEQ_START_STATE; val &= ~(PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL); pll_writel_delay(val, PLLE_AUX); val |= PLLE_AUX_SEQ_ENABLE; pll_writel_delay(val, PLLE_AUX); #endif /* clear XUSB PLL pad pwr override and iddq */ val = xusb_padctl_readl(XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); val &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_PWR_OVRD; val &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0_PLL_IDDQ; xusb_padctl_writel(val, XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); xusb_padctl_readl(XUSB_PADCTL_IOPHY_PLL_P0_CTL1_0); /* enable hw control of xusb brick pll */ usb_plls_hw_control_enable(XUSBIO_PLL_CFG0); return 0; } #ifdef CONFIG_PM_SLEEP static void tegra12_plle_clk_resume(struct clk *c) { u32 val = clk_readl(c->reg + PLL_BASE); if (val & PLL_BASE_ENABLE) return; /* already resumed */ /* Restore parent */ val = clk_readl(PLLE_AUX); select_pll_e_input(val); } #endif static struct clk_ops tegra_plle_ops = { .init = tegra12_plle_clk_init, .enable = tegra12_plle_clk_enable, .disable = tegra12_plle_clk_disable, }; /* * Tegra12 includes dynamic frequency lock loop (DFLL) with automatic voltage * control as possible CPU clock source. It is included in the Tegra12 clock * tree as "complex PLL" with standard Tegra clock framework APIs. However, * DFLL locking logic h/w access APIs are separated in the tegra_cl_dvfs.c * module. Hence, DFLL operations, with the exception of initialization, are * basically cl-dvfs wrappers. */ /* DFLL operations */ #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS static void tune_cpu_trimmers(bool trim_high) { tegra_soctherm_adjust_cpu_zone(trim_high); } #endif static void __init tegra12_dfll_clk_init(struct clk *c) { c->ops->init = tegra12_dfll_cpu_late_init; } static int tegra12_dfll_clk_enable(struct clk *c) { return tegra_cl_dvfs_enable(c->u.dfll.cl_dvfs); } static void tegra12_dfll_clk_disable(struct clk *c) { tegra_cl_dvfs_disable(c->u.dfll.cl_dvfs); } static int tegra12_dfll_clk_set_rate(struct clk *c, unsigned long rate) { int ret = tegra_cl_dvfs_request_rate(c->u.dfll.cl_dvfs, rate); if (!ret) c->rate = tegra_cl_dvfs_request_get(c->u.dfll.cl_dvfs); return ret; } static void tegra12_dfll_clk_reset(struct clk *c, bool assert) { u32 val = assert ? DFLL_BASE_RESET : 0; clk_writelx_delay(val, c->reg); } static int tegra12_dfll_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_DFLL_LOCK) return setting ? tegra_cl_dvfs_lock(c->u.dfll.cl_dvfs) : tegra_cl_dvfs_unlock(c->u.dfll.cl_dvfs); return -EINVAL; } #ifdef CONFIG_PM_SLEEP static void tegra12_dfll_clk_resume(struct clk *c) { if (!(clk_readlx(c->reg) & DFLL_BASE_RESET)) return; /* already resumed */ if (c->state != UNINITIALIZED) { tegra_periph_reset_deassert(c); tegra_cl_dvfs_resume(c->u.dfll.cl_dvfs); } } #endif static struct clk_ops tegra_dfll_ops = { .init = tegra12_dfll_clk_init, .enable = tegra12_dfll_clk_enable, .disable = tegra12_dfll_clk_disable, .set_rate = tegra12_dfll_clk_set_rate, .reset = tegra12_dfll_clk_reset, .clk_cfg_ex = tegra12_dfll_clk_cfg_ex, }; /* DFLL sysfs interface */ static int tegra12_use_dfll_cb(const char *arg, const struct kernel_param *kp) { int ret = 0; unsigned int old_use_dfll; if (tegra_override_dfll_range != TEGRA_USE_DFLL_CDEV_CNTRL) { old_use_dfll = use_dfll; param_set_int(arg, kp); ret = tegra_clk_dfll_range_control(use_dfll); if (ret) use_dfll = old_use_dfll; return ret; } else { pr_warn("\n%s: Failed to set use_dfll\n", __func__); pr_warn("DFLL usage is under thermal cooling device control\n"); return -EACCES; } } static struct kernel_param_ops tegra12_use_dfll_ops = { .set = tegra12_use_dfll_cb, .get = param_get_int, }; module_param_cb(use_dfll, &tegra12_use_dfll_ops, &use_dfll, 0644); /* Clock divider ops (non-atomic shared register access) */ static DEFINE_SPINLOCK(pll_div_lock); static int tegra12_pll_div_clk_set_rate(struct clk *c, unsigned long rate); static void tegra12_pll_div_clk_init(struct clk *c) { if (c->flags & DIV_U71) { u32 val, divu71; if (c->parent->state == OFF) c->ops->disable(c); val = clk_readl(c->reg); val >>= c->reg_shift; c->state = (val & PLL_OUT_CLKEN) ? ON : OFF; if (!(val & PLL_OUT_RESET_DISABLE)) c->state = OFF; if (c->u.pll_div.default_rate) { int ret = tegra12_pll_div_clk_set_rate( c, c->u.pll_div.default_rate); if (!ret) return; } divu71 = (val & PLL_OUT_RATIO_MASK) >> PLL_OUT_RATIO_SHIFT; c->div = (divu71 + 2); c->mul = 2; } else if (c->flags & DIV_2) { c->state = ON; if (c->flags & (PLLD | PLLX)) { c->div = 2; c->mul = 1; } else BUG(); } else if (c->flags & PLLU) { u32 val = clk_readl(c->reg); c->state = val & (0x1 << c->reg_shift) ? ON : OFF; } else { c->state = ON; c->div = 1; c->mul = 1; } } static int tegra12_pll_div_clk_enable(struct clk *c) { u32 val; u32 new_val; unsigned long flags; pr_debug("%s: %s\n", __func__, c->name); if (c->flags & DIV_U71) { spin_lock_irqsave(&pll_div_lock, flags); val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; new_val |= PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE; val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel_delay(val, c->reg); spin_unlock_irqrestore(&pll_div_lock, flags); return 0; } else if (c->flags & DIV_2) { return 0; } else if (c->flags & PLLU) { clk_lock_save(c->parent, &flags); val = clk_readl(c->reg) | (0x1 << c->reg_shift); clk_writel_delay(val, c->reg); clk_unlock_restore(c->parent, &flags); return 0; } return -EINVAL; } static void tegra12_pll_div_clk_disable(struct clk *c) { u32 val; u32 new_val; unsigned long flags; pr_debug("%s: %s\n", __func__, c->name); if (c->flags & DIV_U71) { spin_lock_irqsave(&pll_div_lock, flags); val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; new_val &= ~(PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE); val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel_delay(val, c->reg); spin_unlock_irqrestore(&pll_div_lock, flags); } else if (c->flags & PLLU) { clk_lock_save(c->parent, &flags); val = clk_readl(c->reg) & (~(0x1 << c->reg_shift)); clk_writel_delay(val, c->reg); clk_unlock_restore(c->parent, &flags); } } static int tegra12_pll_div_clk_set_rate(struct clk *c, unsigned long rate) { u32 val; u32 new_val; int divider_u71; unsigned long parent_rate = clk_get_rate(c->parent); unsigned long flags; pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (tegra_platform_is_qt()) return 0; if (c->flags & DIV_U71) { divider_u71 = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider_u71 >= 0) { spin_lock_irqsave(&pll_div_lock, flags); val = clk_readl(c->reg); new_val = val >> c->reg_shift; new_val &= 0xFFFF; if (c->flags & DIV_U71_FIXED) new_val |= PLL_OUT_OVERRIDE; new_val &= ~PLL_OUT_RATIO_MASK; new_val |= divider_u71 << PLL_OUT_RATIO_SHIFT; val &= ~(0xFFFF << c->reg_shift); val |= new_val << c->reg_shift; clk_writel_delay(val, c->reg); c->div = divider_u71 + 2; c->mul = 2; spin_unlock_irqrestore(&pll_div_lock, flags); return 0; } } else if (c->flags & DIV_2) return clk_set_rate(c->parent, rate * 2); return -EINVAL; } static long tegra12_pll_div_clk_round_rate(struct clk *c, unsigned long rate) { int divider; unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { divider = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_2) /* no rounding - fixed DIV_2 dividers pass rate to parent PLL */ return rate; return -EINVAL; } static struct clk_ops tegra_pll_div_ops = { .init = tegra12_pll_div_clk_init, .enable = tegra12_pll_div_clk_enable, .disable = tegra12_pll_div_clk_disable, .set_rate = tegra12_pll_div_clk_set_rate, .round_rate = tegra12_pll_div_clk_round_rate, }; /* Periph clk ops */ static inline u32 periph_clk_source_mask(struct clk *c) { if (c->u.periph.src_mask) return c->u.periph.src_mask; else if (c->flags & MUX_PWM) return 3 << 28; else if (c->flags & MUX_CLK_OUT) return 3 << (c->u.periph.clk_num + 4); else if (c->flags & PLLD) return PLLD_BASE_DSI_MUX_MASK; else return 7 << 29; } static inline u32 periph_clk_source_shift(struct clk *c) { if (c->u.periph.src_shift) return c->u.periph.src_shift; else if (c->flags & MUX_PWM) return 28; else if (c->flags & MUX_CLK_OUT) return c->u.periph.clk_num + 4; else if (c->flags & PLLD) return PLLD_BASE_DSI_MUX_SHIFT; else return 29; } static void tegra12_periph_clk_init(struct clk *c) { u32 val = clk_readl(c->reg); const struct clk_mux_sel *mux = 0; const struct clk_mux_sel *sel; if (c->flags & MUX) { for (sel = c->inputs; sel->input != NULL; sel++) { if (((val & periph_clk_source_mask(c)) >> periph_clk_source_shift(c)) == sel->value) mux = sel; } BUG_ON(!mux); c->parent = mux->input; } else { if (c->flags & PLLU) { /* for xusb_hs clock enforce SS div2 source */ val &= ~periph_clk_source_mask(c); clk_writel_delay(val, c->reg); } c->parent = c->inputs[0].input; } /* if peripheral is left under reset - enforce safe rate */ if (c->flags & PERIPH_NO_RESET) { if (tegra12_periph_is_special_reset(c)) { tegra_periph_clk_safe_rate_init(c); val = clk_readl(c->reg); } } else if (IS_PERIPH_IN_RESET(c)) { tegra_periph_clk_safe_rate_init(c); val = clk_readl(c->reg); } if (c->flags & DIV_U71) { u32 divu71 = val & PERIPH_CLK_SOURCE_DIVU71_MASK; if (c->flags & DIV_U71_IDLE) { val &= ~(PERIPH_CLK_SOURCE_DIVU71_MASK << PERIPH_CLK_SOURCE_DIVIDLE_SHIFT); val |= (PERIPH_CLK_SOURCE_DIVIDLE_VAL << PERIPH_CLK_SOURCE_DIVIDLE_SHIFT); clk_writel(val, c->reg); } c->div = divu71 + 2; c->mul = 2; } else if (c->flags & DIV_U151) { u32 divu151 = val & PERIPH_CLK_SOURCE_DIVU16_MASK; if ((c->flags & DIV_U151_UART) && (!(val & PERIPH_CLK_UART_DIV_ENB))) { divu151 = 0; } c->div = divu151 + 2; c->mul = 2; } else if (c->flags & DIV_U16) { u32 divu16 = val & PERIPH_CLK_SOURCE_DIVU16_MASK; c->div = divu16 + 1; c->mul = 1; } else { c->div = 1; c->mul = 1; } if (c->flags & PERIPH_NO_ENB) { c->state = c->parent->state; return; } c->state = ON; if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c))) c->state = OFF; if (!(c->flags & PERIPH_NO_RESET)) if (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c)) c->state = OFF; } static int tegra12_periph_clk_enable(struct clk *c) { unsigned long flags; pr_debug("%s on clock %s\n", __func__, c->name); if (c->flags & PERIPH_NO_ENB) return 0; spin_lock_irqsave(&periph_refcount_lock, flags); tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++; if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] > 1) { spin_unlock_irqrestore(&periph_refcount_lock, flags); return 0; } clk_writel_delay(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_ENB_SET_REG(c)); if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET)) { if (clk_readl(PERIPH_CLK_TO_RST_REG(c)) & PERIPH_CLK_TO_BIT(c)) { udelay(RESET_PROPAGATION_DELAY); clk_writel_delay(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_CLR_REG(c)); } } spin_unlock_irqrestore(&periph_refcount_lock, flags); return 0; } static void tegra12_periph_clk_disable(struct clk *c) { unsigned long val, flags; pr_debug("%s on clock %s\n", __func__, c->name); if (c->flags & PERIPH_NO_ENB) return; spin_lock_irqsave(&periph_refcount_lock, flags); if (c->refcnt) tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--; if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0) { /* If peripheral is in the APB bus then read the APB bus to * flush the write operation in apb bus. This will avoid the * peripheral access after disabling clock*/ if (c->flags & PERIPH_ON_APB) val = tegra_read_chipid(); clk_writel_delay( PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_ENB_CLR_REG(c)); } spin_unlock_irqrestore(&periph_refcount_lock, flags); } static void tegra12_periph_clk_reset(struct clk *c, bool assert) { unsigned long val; pr_debug("%s %s on clock %s\n", __func__, assert ? "assert" : "deassert", c->name); if (c->flags & PERIPH_NO_ENB) return; if (!(c->flags & PERIPH_NO_RESET)) { if (assert) { /* If peripheral is in the APB bus then read the APB * bus to flush the write operation in apb bus. This * will avoid the peripheral access after disabling * clock */ if (c->flags & PERIPH_ON_APB) val = tegra_read_chipid(); clk_writel_delay(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_SET_REG(c)); } else clk_writel_delay(PERIPH_CLK_TO_BIT(c), PERIPH_CLK_TO_RST_CLR_REG(c)); } } static int tegra12_periph_clk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; pr_debug("%s: %s %s\n", __func__, c->name, p->name); if (!(c->flags & MUX)) return (p == c->parent) ? 0 : (-EINVAL); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { val = clk_readl(c->reg); val &= ~periph_clk_source_mask(c); val |= (sel->value << periph_clk_source_shift(c)); if (c->refcnt) clk_enable(p); clk_writel_delay(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static int tegra12_periph_clk_set_rate(struct clk *c, unsigned long rate) { u32 val; int divider; unsigned long parent_rate = clk_get_rate(c->parent); if (tegra_platform_is_qt()) return 0; if (c->flags & DIV_U71) { divider = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK; val |= divider; clk_writel_delay(val, c->reg); c->div = divider + 2; c->mul = 2; return 0; } } else if (c->flags & DIV_U151) { divider = clk_div151_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK; val |= divider; if (c->flags & DIV_U151_UART) { if (divider) val |= PERIPH_CLK_UART_DIV_ENB; else val &= ~PERIPH_CLK_UART_DIV_ENB; } clk_writel_delay(val, c->reg); c->div = divider + 2; c->mul = 2; return 0; } } else if (c->flags & DIV_U16) { divider = clk_div16_get_divider(parent_rate, rate); if (divider >= 0) { val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK; val |= divider; clk_writel_delay(val, c->reg); c->div = divider + 1; c->mul = 1; return 0; } } else if (parent_rate <= rate) { c->div = 1; c->mul = 1; return 0; } return -EINVAL; } static long tegra12_periph_clk_round_rate(struct clk *c, unsigned long rate) { int divider; unsigned long parent_rate = clk_get_rate(c->parent); pr_debug("%s: %s %lu\n", __func__, c->name, rate); if (c->flags & DIV_U71) { divider = clk_div71_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_U151) { divider = clk_div151_get_divider( parent_rate, rate, c->flags, ROUND_DIVIDER_UP); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate * 2, divider + 2); } else if (c->flags & DIV_U16) { divider = clk_div16_get_divider(parent_rate, rate); if (divider < 0) return divider; return DIV_ROUND_UP(parent_rate, divider + 1); } return -EINVAL; } static struct clk_ops tegra_periph_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_parent = &tegra12_periph_clk_set_parent, .set_rate = &tegra12_periph_clk_set_rate, .round_rate = &tegra12_periph_clk_round_rate, .reset = &tegra12_periph_clk_reset, }; /* 1x shared bus ops */ static long _1x_round_updown(struct clk *c, struct clk *src, unsigned long rate, bool up) { return fixed_src_bus_round_updown(c, src, c->flags, rate, up); } static long tegra12_1xbus_round_updown(struct clk *c, unsigned long rate, bool up) { unsigned long pll_low_rate, pll_high_rate; rate = max(rate, c->min_rate); pll_low_rate = _1x_round_updown(c, c->u.periph.pll_low, rate, up); if (rate <= c->u.periph.threshold) { c->u.periph.pll_selected = c->u.periph.pll_low; return pll_low_rate; } pll_high_rate = _1x_round_updown(c, c->u.periph.pll_high, rate, up); if (pll_high_rate <= c->u.periph.threshold) { c->u.periph.pll_selected = c->u.periph.pll_low; return pll_low_rate; /* prevent oscillation across threshold */ } if (up) { /* rounding up: both plls may hit max, and round down */ if (pll_high_rate < rate) { if (pll_low_rate < pll_high_rate) { c->u.periph.pll_selected = c->u.periph.pll_high; return pll_high_rate; } } else { if ((pll_low_rate < rate) || (pll_low_rate > pll_high_rate)) { c->u.periph.pll_selected = c->u.periph.pll_high; return pll_high_rate; } } } else if (pll_low_rate < pll_high_rate) { /* rounding down: to get here both plls able to round down */ c->u.periph.pll_selected = c->u.periph.pll_high; return pll_high_rate; } c->u.periph.pll_selected = c->u.periph.pll_low; return pll_low_rate; } static long tegra12_1xbus_round_rate(struct clk *c, unsigned long rate) { return tegra12_1xbus_round_updown(c, rate, true); } static int tegra12_1xbus_set_rate(struct clk *c, unsigned long rate) { /* Compensate rate truncating during rounding */ return tegra12_periph_clk_set_rate(c, rate + 1); } static int tegra12_clk_1xbus_update(struct clk *c) { int ret; struct clk *new_parent; unsigned long rate, old_rate; if (detach_shared_bus) return 0; rate = tegra12_clk_shared_bus_update(c, NULL, NULL, NULL); old_rate = clk_get_rate_locked(c); pr_debug("\n1xbus %s: rate %lu on parent %s: new request %lu\n", c->name, old_rate, c->parent->name, rate); if (rate == old_rate) return 0; if (!c->u.periph.min_div_low || !c->u.periph.min_div_high) { unsigned long r, m = c->max_rate; r = clk_get_rate(c->u.periph.pll_low); c->u.periph.min_div_low = DIV_ROUND_UP(r, m) * c->mul; r = clk_get_rate(c->u.periph.pll_high); c->u.periph.min_div_high = DIV_ROUND_UP(r, m) * c->mul; } new_parent = c->u.periph.pll_selected; /* * The transition procedure below is guaranteed to switch to the target * parent/rate without violation of max clock limits. It would attempt * to switch without dip in bus rate if it is possible, but this cannot * be guaranteed (example: switch from 408 MHz : 1 to 624 MHz : 2 with * maximum bus limit 408 MHz will be executed as 408 => 204 => 312 MHz, * and there is no way to avoid rate dip in this case). */ if (new_parent != c->parent) { int interim_div = 0; /* Switching to pll_high may over-clock bus if current divider is too small - increase divider to safe value */ if ((new_parent == c->u.periph.pll_high) && (c->div < c->u.periph.min_div_high)) interim_div = c->u.periph.min_div_high; /* Switching to pll_low may dip down rate if current divider is too big - decrease divider as much as we can */ if ((new_parent == c->u.periph.pll_low) && (c->div > c->u.periph.min_div_low) && (c->div > c->u.periph.min_div_high)) interim_div = c->u.periph.min_div_low; if (interim_div) { u64 interim_rate = old_rate * c->div; do_div(interim_rate, interim_div); ret = clk_set_rate_locked(c, interim_rate); if (ret) { pr_err("Failed to set %s rate to %lu\n", c->name, (unsigned long)interim_rate); return ret; } pr_debug("1xbus %s: rate %lu on parent %s\n", c->name, clk_get_rate_locked(c), c->parent->name); } ret = clk_set_parent_locked(c, new_parent); if (ret) { pr_err("Failed to set %s parent %s\n", c->name, new_parent->name); return ret; } old_rate = clk_get_rate_locked(c); pr_debug("1xbus %s: rate %lu on parent %s\n", c->name, old_rate, c->parent->name); if (rate == old_rate) return 0; } ret = clk_set_rate_locked(c, rate); if (ret) { pr_err("Failed to set %s rate to %lu\n", c->name, rate); return ret; } pr_debug("1xbus %s: rate %lu on parent %s\n", c->name, clk_get_rate_locked(c), c->parent->name); return 0; } static struct clk_ops tegra_1xbus_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_parent = &tegra12_periph_clk_set_parent, .set_rate = &tegra12_1xbus_set_rate, .round_rate = &tegra12_1xbus_round_rate, .round_rate_updown = &tegra12_1xbus_round_updown, .reset = &tegra12_periph_clk_reset, .shared_bus_update = &tegra12_clk_1xbus_update, }; /* msenc clock propagation WAR for bug 1005168 */ static int tegra12_msenc_clk_enable(struct clk *c) { int ret = tegra12_periph_clk_enable(c); if (ret) return ret; clk_writel(0, LVL2_CLK_GATE_OVRE); clk_writel(0x00400000, LVL2_CLK_GATE_OVRE); udelay(1); clk_writel(0, LVL2_CLK_GATE_OVRE); return 0; } static struct clk_ops tegra_msenc_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_msenc_clk_enable, .disable = &tegra12_periph_clk_disable, .set_parent = &tegra12_periph_clk_set_parent, .set_rate = &tegra12_periph_clk_set_rate, .round_rate = &tegra12_periph_clk_round_rate, .reset = &tegra12_periph_clk_reset, }; /* Periph extended clock configuration ops */ static int tegra12_vi_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_VI_INP_SEL) { u32 val = clk_readl(c->reg); val &= ~PERIPH_CLK_VI_SEL_EX_MASK; val |= (setting << PERIPH_CLK_VI_SEL_EX_SHIFT) & PERIPH_CLK_VI_SEL_EX_MASK; clk_writel(val, c->reg); return 0; } return -EINVAL; } static struct clk_ops tegra_vi_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_parent = &tegra12_periph_clk_set_parent, .set_rate = &tegra12_periph_clk_set_rate, .round_rate = &tegra12_periph_clk_round_rate, .clk_cfg_ex = &tegra12_vi_clk_cfg_ex, .reset = &tegra12_periph_clk_reset, }; static int tegra12_sor_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_SOR_CLK_SEL) { u32 val = clk_readl(c->reg); val &= ~PERIPH_CLK_SOR_CLK_SEL_MASK; val |= (setting << PERIPH_CLK_SOR_CLK_SEL_SHIFT) & PERIPH_CLK_SOR_CLK_SEL_MASK; clk_writel(val, c->reg); return 0; } return -EINVAL; } static struct clk_ops tegra_sor_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_parent = &tegra12_periph_clk_set_parent, .set_rate = &tegra12_periph_clk_set_rate, .round_rate = &tegra12_periph_clk_round_rate, .clk_cfg_ex = &tegra12_sor_clk_cfg_ex, .reset = &tegra12_periph_clk_reset, }; static int tegra12_dtv_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) { if (p == TEGRA_CLK_DTV_INVERT) { u32 val = clk_readl(c->reg); if (setting) val |= PERIPH_CLK_DTV_POLARITY_INV; else val &= ~PERIPH_CLK_DTV_POLARITY_INV; clk_writel(val, c->reg); return 0; } return -EINVAL; } static struct clk_ops tegra_dtv_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_parent = &tegra12_periph_clk_set_parent, .set_rate = &tegra12_periph_clk_set_rate, .round_rate = &tegra12_periph_clk_round_rate, .clk_cfg_ex = &tegra12_dtv_clk_cfg_ex, .reset = &tegra12_periph_clk_reset, }; static struct clk_ops tegra_dsi_clk_ops = { .init = &tegra12_periph_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_rate = &tegra12_periph_clk_set_rate, .round_rate = &tegra12_periph_clk_round_rate, .reset = &tegra12_periph_clk_reset, }; /* pciex clock support only reset function */ static void tegra12_pciex_clk_init(struct clk *c) { c->state = c->parent->state; } static int tegra12_pciex_clk_enable(struct clk *c) { return 0; } static void tegra12_pciex_clk_disable(struct clk *c) { } static int tegra12_pciex_clk_set_rate(struct clk *c, unsigned long rate) { unsigned long parent_rate = clk_get_rate(c->parent); /* * the only supported pcie configurations: * Gen1: plle = 100MHz, link at 250MHz * Gen2: plle = 100MHz, link at 500MHz */ if (parent_rate == 100000000) { if (rate == 500000000) { c->mul = 5; c->div = 1; return 0; } else if (rate == 250000000) { c->mul = 5; c->div = 2; return 0; } } return -EINVAL; } static struct clk_ops tegra_pciex_clk_ops = { .init = tegra12_pciex_clk_init, .enable = tegra12_pciex_clk_enable, .disable = tegra12_pciex_clk_disable, .set_rate = tegra12_pciex_clk_set_rate, .reset = tegra12_periph_clk_reset, }; /* Output clock ops */ static DEFINE_SPINLOCK(clk_out_lock); static void tegra12_clk_out_init(struct clk *c) { const struct clk_mux_sel *mux = 0; const struct clk_mux_sel *sel; u32 val = pmc_readl(c->reg); c->state = (val & (0x1 << c->u.periph.clk_num)) ? ON : OFF; c->mul = 1; c->div = 1; for (sel = c->inputs; sel->input != NULL; sel++) { if (((val & periph_clk_source_mask(c)) >> periph_clk_source_shift(c)) == sel->value) mux = sel; } BUG_ON(!mux); c->parent = mux->input; } static int tegra12_clk_out_enable(struct clk *c) { u32 val; unsigned long flags; pr_debug("%s on clock %s\n", __func__, c->name); spin_lock_irqsave(&clk_out_lock, flags); val = pmc_readl(c->reg); val |= (0x1 << c->u.periph.clk_num); pmc_writel(val, c->reg); pmc_readl(c->reg); spin_unlock_irqrestore(&clk_out_lock, flags); return 0; } static void tegra12_clk_out_disable(struct clk *c) { u32 val; unsigned long flags; pr_debug("%s on clock %s\n", __func__, c->name); spin_lock_irqsave(&clk_out_lock, flags); val = pmc_readl(c->reg); val &= ~(0x1 << c->u.periph.clk_num); pmc_writel(val, c->reg); pmc_readl(c->reg); spin_unlock_irqrestore(&clk_out_lock, flags); } static int tegra12_clk_out_set_parent(struct clk *c, struct clk *p) { u32 val; unsigned long flags; const struct clk_mux_sel *sel; pr_debug("%s: %s %s\n", __func__, c->name, p->name); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { if (c->refcnt) clk_enable(p); spin_lock_irqsave(&clk_out_lock, flags); val = pmc_readl(c->reg); val &= ~periph_clk_source_mask(c); val |= (sel->value << periph_clk_source_shift(c)); pmc_writel(val, c->reg); pmc_readl(c->reg); spin_unlock_irqrestore(&clk_out_lock, flags); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static struct clk_ops tegra_clk_out_ops = { .init = &tegra12_clk_out_init, .enable = &tegra12_clk_out_enable, .disable = &tegra12_clk_out_disable, .set_parent = &tegra12_clk_out_set_parent, }; /* External memory controller clock ops */ static void tegra12_emc_clk_init(struct clk *c) { tegra12_periph_clk_init(c); tegra_emc_dram_type_init(c); } static long tegra12_emc_clk_round_updown(struct clk *c, unsigned long rate, bool up) { unsigned long new_rate = max(rate, c->min_rate); new_rate = tegra_emc_round_rate_updown(new_rate, up); if (IS_ERR_VALUE(new_rate)) new_rate = c->max_rate; return new_rate; } static long tegra12_emc_clk_round_rate(struct clk *c, unsigned long rate) { return tegra12_emc_clk_round_updown(c, rate, true); } static int tegra12_emc_clk_set_rate(struct clk *c, unsigned long rate) { int ret; u32 div_value; struct clk *p; if (tegra_platform_is_qt()) return 0; /* The tegra12x memory controller has an interlock with the clock * block that allows memory shadowed registers to be updated, * and then transfer them to the main registers at the same * time as the clock update without glitches. During clock change * operation both clock parent and divider may change simultaneously * to achieve requested rate. */ p = tegra_emc_predict_parent(rate, &div_value); div_value += 2; /* emc has fractional DIV_U71 divider */ if (IS_ERR_OR_NULL(p)) { pr_err("%s: Failed to predict emc parent for rate %lu\n", __func__, rate); return -EINVAL; } if (p == c->parent) { if (div_value == c->div) return 0; } else if (c->refcnt) clk_enable(p); ret = tegra_emc_set_rate(rate); if (ret < 0) return ret; if (p != c->parent) { if(c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); } c->div = div_value; c->mul = 2; return 0; } static int tegra12_clk_emc_bus_update(struct clk *bus) { struct clk *p = NULL; unsigned long rate, old_rate, parent_rate, backup_rate; if (detach_shared_bus) return 0; rate = tegra12_clk_shared_bus_update(bus, NULL, NULL, NULL); old_rate = clk_get_rate_locked(bus); if (rate == old_rate) return 0; if (!tegra_emc_is_parent_ready(rate, &p, &parent_rate, &backup_rate)) { if (bus->parent == p) { /* need backup to re-lock current parent */ int ret; if (IS_ERR_VALUE(backup_rate)) { pr_err("%s: No backup for %s rate %lu\n", __func__, bus->name, rate); return -EINVAL; } /* set volatge for backup rate if going up */ if (backup_rate > old_rate) { ret = tegra_dvfs_set_rate(bus, backup_rate); if (ret) { pr_err("%s: dvfs failed on %s rate %lu\n", __func__, bus->name, backup_rate); return -EINVAL; } } trace_clock_set_rate(bus->name, backup_rate, 0); ret = bus->ops->set_rate(bus, backup_rate); if (ret) { pr_err("%s: Failed to backup %s for rate %lu\n", __func__, bus->name, rate); return -EINVAL; } clk_rate_change_notify(bus, backup_rate); } if (p->refcnt) { pr_err("%s: %s has other than emc child\n", __func__, p->name); return -EINVAL; } if (clk_set_rate(p, parent_rate)) { pr_err("%s: Failed to set %s rate %lu\n", __func__, p->name, parent_rate); return -EINVAL; } } return clk_set_rate_locked(bus, rate); } static struct clk_ops tegra_emc_clk_ops = { .init = &tegra12_emc_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_rate = &tegra12_emc_clk_set_rate, .round_rate = &tegra12_emc_clk_round_rate, .round_rate_updown = &tegra12_emc_clk_round_updown, .reset = &tegra12_periph_clk_reset, .shared_bus_update = &tegra12_clk_emc_bus_update, }; void tegra_mc_divider_update(struct clk *emc) { emc->child_bus->div = (clk_readl(emc->reg) & PERIPH_CLK_SOURCE_EMC_MC_SAME) ? 1 : 2; } static void tegra12_mc_clk_init(struct clk *c) { c->state = ON; if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c))) c->state = OFF; c->parent->child_bus = c; tegra_mc_divider_update(c->parent); c->mul = 1; } static struct clk_ops tegra_mc_clk_ops = { .init = &tegra12_mc_clk_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, }; /* Clock doubler ops (non-atomic shared register access) */ static DEFINE_SPINLOCK(doubler_lock); static void tegra12_clk_double_init(struct clk *c) { u32 val = clk_readl(c->reg); c->mul = val & (0x1 << c->reg_shift) ? 1 : 2; c->div = 1; c->state = ON; if (!(clk_readl(PERIPH_CLK_TO_ENB_REG(c)) & PERIPH_CLK_TO_BIT(c))) c->state = OFF; }; static int tegra12_clk_double_set_rate(struct clk *c, unsigned long rate) { u32 val; unsigned long parent_rate = clk_get_rate(c->parent); unsigned long flags; if (rate == parent_rate) { spin_lock_irqsave(&doubler_lock, flags); val = clk_readl(c->reg) | (0x1 << c->reg_shift); clk_writel(val, c->reg); c->mul = 1; c->div = 1; spin_unlock_irqrestore(&doubler_lock, flags); return 0; } else if (rate == 2 * parent_rate) { spin_lock_irqsave(&doubler_lock, flags); val = clk_readl(c->reg) & (~(0x1 << c->reg_shift)); clk_writel(val, c->reg); c->mul = 2; c->div = 1; spin_unlock_irqrestore(&doubler_lock, flags); return 0; } return -EINVAL; } static struct clk_ops tegra_clk_double_ops = { .init = &tegra12_clk_double_init, .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .set_rate = &tegra12_clk_double_set_rate, }; /* Audio sync clock ops */ static int tegra12_sync_source_set_rate(struct clk *c, unsigned long rate) { c->rate = rate; return 0; } static struct clk_ops tegra_sync_source_ops = { .set_rate = &tegra12_sync_source_set_rate, }; static void tegra12_audio_sync_clk_init(struct clk *c) { int source; const struct clk_mux_sel *sel; u32 val = clk_readl(c->reg); c->state = (val & AUDIO_SYNC_DISABLE_BIT) ? OFF : ON; source = val & AUDIO_SYNC_SOURCE_MASK; for (sel = c->inputs; sel->input != NULL; sel++) if (sel->value == source) break; BUG_ON(sel->input == NULL); c->parent = sel->input; } static int tegra12_audio_sync_clk_enable(struct clk *c) { u32 val = clk_readl(c->reg); clk_writel((val & (~AUDIO_SYNC_DISABLE_BIT)), c->reg); return 0; } static void tegra12_audio_sync_clk_disable(struct clk *c) { u32 val = clk_readl(c->reg); clk_writel((val | AUDIO_SYNC_DISABLE_BIT), c->reg); } static int tegra12_audio_sync_clk_set_parent(struct clk *c, struct clk *p) { u32 val; const struct clk_mux_sel *sel; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) { val = clk_readl(c->reg); val &= ~AUDIO_SYNC_SOURCE_MASK; val |= sel->value; if (c->refcnt) clk_enable(p); clk_writel(val, c->reg); if (c->refcnt && c->parent) clk_disable(c->parent); clk_reparent(c, p); return 0; } } return -EINVAL; } static struct clk_ops tegra_audio_sync_clk_ops = { .init = tegra12_audio_sync_clk_init, .enable = tegra12_audio_sync_clk_enable, .disable = tegra12_audio_sync_clk_disable, .set_parent = tegra12_audio_sync_clk_set_parent, }; /* cml0 (pcie), and cml1 (sata) clock ops */ static void tegra12_cml_clk_init(struct clk *c) { u32 val = clk_readl(c->reg); c->state = val & (0x1 << c->u.periph.clk_num) ? ON : OFF; } static int tegra12_cml_clk_enable(struct clk *c) { u32 val = clk_readl(c->reg); val |= (0x1 << c->u.periph.clk_num); clk_writel(val, c->reg); return 0; } static void tegra12_cml_clk_disable(struct clk *c) { u32 val = clk_readl(c->reg); val &= ~(0x1 << c->u.periph.clk_num); clk_writel(val, c->reg); } static struct clk_ops tegra_cml_clk_ops = { .init = &tegra12_cml_clk_init, .enable = &tegra12_cml_clk_enable, .disable = &tegra12_cml_clk_disable, }; /* cbus ops */ /* * Some clocks require dynamic re-locking of source PLL in order to * achieve frequency scaling granularity that matches characterized * core voltage steps. The cbus clock creates a shared bus that * provides a virtual root for such clocks to hide and synchronize * parent PLL re-locking as well as backup operations. */ static void tegra12_clk_cbus_init(struct clk *c) { c->state = OFF; c->set = true; } static int tegra12_clk_cbus_enable(struct clk *c) { return 0; } static long tegra12_clk_cbus_round_updown(struct clk *c, unsigned long rate, bool up) { int i; const int *millivolts; if (!c->dvfs) { if (!c->min_rate) c->min_rate = c->parent->min_rate; rate = max(rate, c->min_rate); return rate; } /* update min now, since no dvfs table was available during init (skip placeholder entries set to 1 kHz) */ if (!c->min_rate) { for (i = 0; i < c->dvfs->num_freqs; i++) { if (c->dvfs->freqs[i] > 1 * c->dvfs->freqs_mult) { c->min_rate = c->dvfs->freqs[i]; break; } } BUG_ON(!c->min_rate); } rate = max(rate, c->min_rate); millivolts = tegra_dvfs_get_millivolts_pll(c->dvfs); for (i = 0; ; i++) { unsigned long f = c->dvfs->freqs[i]; int mv = millivolts[i]; if ((f >= rate) || (mv >= c->dvfs->max_millivolts) || ((i + 1) >= c->dvfs->num_freqs)) { if (!up && i && (f > rate)) i--; break; } } return c->dvfs->freqs[i]; } static long tegra12_clk_cbus_round_rate(struct clk *c, unsigned long rate) { return tegra12_clk_cbus_round_updown(c, rate, true); } static int cbus_switch_one(struct clk *c, struct clk *p, u32 div, bool abort) { int ret = 0; /* set new divider if it is bigger than the current one */ if (c->div < c->mul * div) { ret = clk_set_div(c, div); if (ret) { pr_err("%s: failed to set %s clock divider %u: %d\n", __func__, c->name, div, ret); if (abort) return ret; } } if (c->parent != p) { ret = clk_set_parent(c, p); if (ret) { pr_err("%s: failed to set %s clock parent %s: %d\n", __func__, c->name, p->name, ret); if (abort) return ret; } } /* set new divider if it is smaller than the current one */ if (c->div > c->mul * div) { ret = clk_set_div(c, div); if (ret) pr_err("%s: failed to set %s clock divider %u: %d\n", __func__, c->name, div, ret); } return ret; } static int cbus_backup(struct clk *c) { int ret; struct clk *user; list_for_each_entry(user, &c->shared_bus_list, u.shared_bus_user.node) { struct clk *client = user->u.shared_bus_user.client; if (client && (client->state == ON) && (client->parent == c->parent)) { ret = cbus_switch_one(client, c->shared_bus_backup.input, c->shared_bus_backup.value * user->div, true); if (ret) return ret; } } return 0; } static int cbus_dvfs_set_rate(struct clk *c, unsigned long rate) { int ret; struct clk *user; list_for_each_entry(user, &c->shared_bus_list, u.shared_bus_user.node) { struct clk *client = user->u.shared_bus_user.client; if (client && client->refcnt && (client->parent == c->parent)) { ret = tegra_dvfs_set_rate(c, rate); if (ret) return ret; } } return 0; } static void cbus_restore(struct clk *c) { struct clk *user; list_for_each_entry(user, &c->shared_bus_list, u.shared_bus_user.node) { if (user->u.shared_bus_user.client) cbus_switch_one(user->u.shared_bus_user.client, c->parent, c->div * user->div, false); } } static int get_next_backup_div(struct clk *c, unsigned long rate) { u32 div = c->div; unsigned long backup_rate = clk_get_rate(c->shared_bus_backup.input); rate = max(rate, clk_get_rate_locked(c)); rate = rate - (rate >> 2); /* 25% margin for backup rate */ if ((u64)rate * div < backup_rate) div = DIV_ROUND_UP(backup_rate, rate); BUG_ON(!div); return div; } static int tegra12_clk_cbus_set_rate(struct clk *c, unsigned long rate) { int ret; bool dramp; if (rate == 0) return 0; if (tegra_platform_is_qt()) return 0; ret = clk_enable(c->parent); if (ret) { pr_err("%s: failed to enable %s clock: %d\n", __func__, c->name, ret); return ret; } dramp = tegra12_is_dyn_ramp(c->parent, rate * c->div, false); if (!dramp) { c->shared_bus_backup.value = get_next_backup_div(c, rate); ret = cbus_backup(c); if (ret) goto out; } ret = clk_set_rate(c->parent, rate * c->div); if (ret) { pr_err("%s: failed to set %s clock rate %lu: %d\n", __func__, c->name, rate, ret); goto out; } /* Safe voltage setting is taken care of by cbus clock dvfs; the call * below only records requirements for each enabled client. */ if (dramp) ret = cbus_dvfs_set_rate(c, rate); cbus_restore(c); out: clk_disable(c->parent); return ret; } static inline void cbus_move_enabled_user( struct clk *user, struct clk *dst, struct clk *src) { clk_enable(dst); list_move_tail(&user->u.shared_bus_user.node, &dst->shared_bus_list); clk_disable(src); clk_reparent(user, dst); } #ifdef CONFIG_TEGRA_DYNAMIC_CBUS static int tegra12_clk_cbus_update(struct clk *bus) { int ret, mv; struct clk *slow = NULL; struct clk *top = NULL; unsigned long rate; unsigned long old_rate; unsigned long ceiling; if (detach_shared_bus) return 0; rate = tegra12_clk_shared_bus_update(bus, &top, &slow, &ceiling); /* use dvfs table of the slowest enabled client as cbus dvfs table */ if (bus->dvfs && slow && (slow != bus->u.cbus.slow_user)) { int i; unsigned long *dest = &bus->dvfs->freqs[0]; unsigned long *src = &slow->u.shared_bus_user.client->dvfs->freqs[0]; if (slow->div > 1) for (i = 0; i < bus->dvfs->num_freqs; i++) dest[i] = src[i] * slow->div; else memcpy(dest, src, sizeof(*dest) * bus->dvfs->num_freqs); } /* update bus state variables and rate */ bus->u.cbus.slow_user = slow; bus->u.cbus.top_user = top; rate = tegra12_clk_cap_shared_bus(bus, rate, ceiling); mv = tegra_dvfs_predict_millivolts(bus, rate); if (IS_ERR_VALUE(mv)) return -EINVAL; if (bus->dvfs) { mv -= bus->dvfs->cur_millivolts; if (bus->refcnt && (mv > 0)) { ret = tegra_dvfs_set_rate(bus, rate); if (ret) return ret; } } old_rate = clk_get_rate_locked(bus); if (IS_ENABLED(CONFIG_TEGRA_MIGRATE_CBUS_USERS) || (old_rate != rate)) { ret = bus->ops->set_rate(bus, rate); if (ret) return ret; } if (bus->dvfs) { if (bus->refcnt && (mv <= 0)) { ret = tegra_dvfs_set_rate(bus, rate); if (ret) return ret; } } clk_rate_change_notify(bus, rate); return 0; }; #else static int tegra12_clk_cbus_update(struct clk *bus) { unsigned long rate, old_rate; if (detach_shared_bus) return 0; rate = tegra12_clk_shared_bus_update(bus, NULL, NULL, NULL); old_rate = clk_get_rate_locked(bus); if (rate == old_rate) return 0; return clk_set_rate_locked(bus, rate); } #endif static int tegra12_clk_cbus_migrate_users(struct clk *user) { #ifdef CONFIG_TEGRA_MIGRATE_CBUS_USERS struct clk *src_bus, *dst_bus, *top_user, *c; struct list_head *pos, *n; if (!user->u.shared_bus_user.client || !user->inputs) return 0; /* Dual cbus on Tegra12 */ src_bus = user->inputs[0].input; dst_bus = user->inputs[1].input; if (!src_bus->u.cbus.top_user && !dst_bus->u.cbus.top_user) return 0; /* Make sure top user on the source bus is requesting highest rate */ if (!src_bus->u.cbus.top_user || (dst_bus->u.cbus.top_user && bus_user_request_is_lower(src_bus->u.cbus.top_user, dst_bus->u.cbus.top_user))) swap(src_bus, dst_bus); /* If top user is the slow one on its own (source) bus, do nothing */ top_user = src_bus->u.cbus.top_user; BUG_ON(!top_user->u.shared_bus_user.client); if (!bus_user_is_slower(src_bus->u.cbus.slow_user, top_user)) return 0; /* If source bus top user is slower than all users on destination bus, move top user; otherwise move all users slower than the top one */ if (!dst_bus->u.cbus.slow_user || !bus_user_is_slower(dst_bus->u.cbus.slow_user, top_user)) { cbus_move_enabled_user(top_user, dst_bus, src_bus); } else { list_for_each_safe(pos, n, &src_bus->shared_bus_list) { c = list_entry(pos, struct clk, u.shared_bus_user.node); if (c->u.shared_bus_user.enabled && c->u.shared_bus_user.client && bus_user_is_slower(c, top_user)) cbus_move_enabled_user(c, dst_bus, src_bus); } } /* Update destination bus 1st (move clients), then source */ tegra_clk_shared_bus_update(dst_bus); tegra_clk_shared_bus_update(src_bus); #endif return 0; } static struct clk_ops tegra_clk_cbus_ops = { .init = tegra12_clk_cbus_init, .enable = tegra12_clk_cbus_enable, .set_rate = tegra12_clk_cbus_set_rate, .round_rate = tegra12_clk_cbus_round_rate, .round_rate_updown = tegra12_clk_cbus_round_updown, .shared_bus_update = tegra12_clk_cbus_update, }; /* shared bus ops */ /* * Some clocks may have multiple downstream users that need to request a * higher clock rate. Shared bus clocks provide a unique shared_bus_user * clock to each user. The frequency of the bus is set to the highest * enabled shared_bus_user clock, with a minimum value set by the * shared bus. * * Optionally shared bus may support users migration. Since shared bus and * its * children (users) have reversed rate relations: user rates determine * bus rate, * switching user from one parent/bus to another may change rates * of both parents. Therefore we need a cross-bus lock on top of individual * user and bus locks. For now, limit bus switch support to cbus only if * CONFIG_TEGRA_MIGRATE_CBUS_USERS is set. */ static unsigned long tegra12_clk_shared_bus_update(struct clk *bus, struct clk **bus_top, struct clk **bus_slow, unsigned long *rate_cap) { struct clk *c; struct clk *slow = NULL; struct clk *top = NULL; unsigned long override_rate = 0; unsigned long top_rate = 0; unsigned long rate = bus->min_rate; unsigned long bw = 0; unsigned long iso_bw = 0; unsigned long ceiling = bus->max_rate; unsigned long ceiling_but_iso = bus->max_rate; u32 usage_flags = 0; bool rate_set = false; list_for_each_entry(c, &bus->shared_bus_list, u.shared_bus_user.node) { bool cap_user = (c->u.shared_bus_user.mode == SHARED_CEILING) || (c->u.shared_bus_user.mode == SHARED_CEILING_BUT_ISO); /* * Ignore requests from disabled floor and bw users, and from * auto-users riding the bus. Always honor ceiling users, even * if they are disabled - we do not want to keep enabled parent * bus just because ceiling is set. Ignore SCLK/AHB/APB dividers * to propagate flat max request. */ if (c->u.shared_bus_user.enabled || cap_user) { unsigned long request_rate = c->u.shared_bus_user.rate; if (!(c->flags & DIV_BUS)) request_rate *= c->div ? : 1; usage_flags |= c->u.shared_bus_user.usage_flag; if (!(c->flags & BUS_RATE_LIMIT)) rate_set = true; switch (c->u.shared_bus_user.mode) { case SHARED_ISO_BW: iso_bw += request_rate; if (iso_bw > bus->max_rate) iso_bw = bus->max_rate; /* fall thru */ case SHARED_BW: bw += request_rate; if (bw > bus->max_rate) bw = bus->max_rate; break; case SHARED_CEILING_BUT_ISO: ceiling_but_iso = min(request_rate, ceiling_but_iso); break; case SHARED_CEILING: ceiling = min(request_rate, ceiling); break; case SHARED_OVERRIDE: if (override_rate == 0) override_rate = request_rate; break; case SHARED_AUTO: break; case SHARED_FLOOR: default: rate = max(request_rate, rate); if (c->u.shared_bus_user.client && request_rate) { if (top_rate < request_rate) { top_rate = request_rate; top = c; } else if ((top_rate == request_rate) && bus_user_is_slower(c, top)) { top = c; } } } if (c->u.shared_bus_user.client && (!slow || bus_user_is_slower(c, slow))) slow = c; } } if (bus->flags & PERIPH_EMC_ENB) { unsigned long iso_bw_min; bw = tegra_emc_apply_efficiency( bw, iso_bw, bus->max_rate, usage_flags, &iso_bw_min); if (bus->ops && bus->ops->round_rate) iso_bw_min = bus->ops->round_rate(bus, iso_bw_min); ceiling_but_iso = max(ceiling_but_iso, iso_bw_min); } rate = override_rate ? : max(rate, bw); ceiling = min(ceiling, ceiling_but_iso); ceiling = override_rate ? bus->max_rate : ceiling; bus->override_rate = override_rate; if (bus_top && bus_slow && rate_cap) { /* If dynamic bus dvfs table, let the caller to complete rounding and aggregation */ *bus_top = top; *bus_slow = slow; *rate_cap = ceiling; } else { /* * If satic bus dvfs table, complete rounding and aggregation. * In case when no user requested bus rate, and bus retention * is enabled, don't scale down - keep current rate. */ if (!rate_set && (bus->shared_bus_flags & SHARED_BUS_RETENTION)) rate = clk_get_rate_locked(bus); rate = tegra12_clk_cap_shared_bus(bus, rate, ceiling); } return rate; }; static unsigned long tegra12_clk_cap_shared_bus(struct clk *bus, unsigned long rate, unsigned long ceiling) { if (bus->ops && bus->ops->round_rate_updown) ceiling = bus->ops->round_rate_updown(bus, ceiling, false); rate = min(rate, ceiling); if (bus->ops && bus->ops->round_rate) rate = bus->ops->round_rate(bus, rate); return rate; } static int tegra_clk_shared_bus_migrate_users(struct clk *user) { if (detach_shared_bus) return 0; /* Only cbus migration is supported */ if (user->flags & PERIPH_ON_CBUS) return tegra12_clk_cbus_migrate_users(user); return -ENOSYS; } static void tegra_clk_shared_bus_user_init(struct clk *c) { c->max_rate = c->parent->max_rate; c->u.shared_bus_user.rate = c->parent->max_rate; c->state = OFF; c->set = true; if ((c->u.shared_bus_user.mode == SHARED_CEILING) || (c->u.shared_bus_user.mode == SHARED_CEILING_BUT_ISO)) { c->state = ON; c->refcnt++; } if (c->u.shared_bus_user.client_id) { struct clk *client = tegra_get_clock_by_name(c->u.shared_bus_user.client_id); if (!client) { pr_err("%s: could not find clk %s\n", __func__, c->u.shared_bus_user.client_id); return; } if ((client->state == ON) && !(client->flags & PERIPH_NO_ENB)) pr_info("%s: %s client %s left ON\n", __func__, c->parent->name, client->name); c->u.shared_bus_user.client = client; c->u.shared_bus_user.client->flags |= c->parent->flags & PERIPH_ON_CBUS; c->flags |= c->parent->flags & PERIPH_ON_CBUS; c->div = c->u.shared_bus_user.client_div ? : 1; c->mul = 1; } list_add_tail(&c->u.shared_bus_user.node, &c->parent->shared_bus_list); } static int tegra_clk_shared_bus_user_set_parent(struct clk *c, struct clk *p) { int ret; const struct clk_mux_sel *sel; if (detach_shared_bus) return 0; if (c->parent == p) return 0; if (!(c->inputs && c->cross_clk_mutex && clk_cansleep(c))) return -ENOSYS; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == p) break; } if (!sel->input) return -EINVAL; if (c->refcnt) clk_enable(p); list_move_tail(&c->u.shared_bus_user.node, &p->shared_bus_list); ret = tegra_clk_shared_bus_update(p); if (ret) { list_move_tail(&c->u.shared_bus_user.node, &c->parent->shared_bus_list); tegra_clk_shared_bus_update(c->parent); clk_disable(p); return ret; } tegra_clk_shared_bus_update(c->parent); if (c->refcnt) clk_disable(c->parent); clk_reparent(c, p); return 0; } static int tegra_clk_shared_bus_user_set_rate(struct clk *c, unsigned long rate) { int ret; c->u.shared_bus_user.rate = rate; ret = tegra_clk_shared_bus_update(c->parent); if (!ret && c->cross_clk_mutex && clk_cansleep(c)) tegra_clk_shared_bus_migrate_users(c); return ret; } static long tegra_clk_shared_bus_user_round_rate( struct clk *c, unsigned long rate) { /* * Defer rounding requests until aggregated. BW users must not be * rounded at all, others just clipped to bus range (some clients * may use round api to find limits). Ignore SCLK/AHB and AHB/APB * dividers to keep flat bus requests propagation. */ if ((c->u.shared_bus_user.mode != SHARED_BW) && (c->u.shared_bus_user.mode != SHARED_ISO_BW)) { if (!(c->flags & DIV_BUS) && (c->div > 1)) rate *= c->div; if (rate > c->parent->max_rate) rate = c->parent->max_rate; else if (rate < c->parent->min_rate) rate = c->parent->min_rate; if (!(c->flags & DIV_BUS) && (c->div > 1)) rate /= c->div; } return rate; } static int tegra_clk_shared_bus_user_enable(struct clk *c) { int ret; c->u.shared_bus_user.enabled = true; ret = tegra_clk_shared_bus_update(c->parent); if (!ret && c->u.shared_bus_user.client) ret = clk_enable(c->u.shared_bus_user.client); if (!ret && c->cross_clk_mutex && clk_cansleep(c)) tegra_clk_shared_bus_migrate_users(c); return ret; } static void tegra_clk_shared_bus_user_disable(struct clk *c) { if (c->u.shared_bus_user.client) clk_disable(c->u.shared_bus_user.client); c->u.shared_bus_user.enabled = false; tegra_clk_shared_bus_update(c->parent); if (c->cross_clk_mutex && clk_cansleep(c)) tegra_clk_shared_bus_migrate_users(c); } static void tegra_clk_shared_bus_user_reset(struct clk *c, bool assert) { if (c->u.shared_bus_user.client) { if (c->u.shared_bus_user.client->ops && c->u.shared_bus_user.client->ops->reset) c->u.shared_bus_user.client->ops->reset( c->u.shared_bus_user.client, assert); } } static struct clk_ops tegra_clk_shared_bus_user_ops = { .init = tegra_clk_shared_bus_user_init, .enable = tegra_clk_shared_bus_user_enable, .disable = tegra_clk_shared_bus_user_disable, .set_parent = tegra_clk_shared_bus_user_set_parent, .set_rate = tegra_clk_shared_bus_user_set_rate, .round_rate = tegra_clk_shared_bus_user_round_rate, .reset = tegra_clk_shared_bus_user_reset, }; /* shared bus connector ops (user/bus connector to cascade shared buses) */ static int tegra12_clk_shared_connector_update(struct clk *bus) { unsigned long rate, old_rate; if (detach_shared_bus) return 0; rate = tegra12_clk_shared_bus_update(bus, NULL, NULL, NULL); old_rate = clk_get_rate_locked(bus); if (rate == old_rate) return 0; return clk_set_rate_locked(bus, rate); } static struct clk_ops tegra_clk_shared_connector_ops = { .init = tegra_clk_shared_bus_user_init, .enable = tegra_clk_shared_bus_user_enable, .disable = tegra_clk_shared_bus_user_disable, .set_parent = tegra_clk_shared_bus_user_set_parent, .set_rate = tegra_clk_shared_bus_user_set_rate, .round_rate = tegra_clk_shared_bus_user_round_rate, .reset = tegra_clk_shared_bus_user_reset, .shared_bus_update = tegra12_clk_shared_connector_update, }; /* coupled gate ops */ /* * Some clocks may have common enable/disable control, but run at different * rates, and have different dvfs tables. Coupled gate clock synchronize * enable/disable operations for such clocks. */ static int tegra12_clk_coupled_gate_enable(struct clk *c) { int ret; const struct clk_mux_sel *sel; BUG_ON(!c->inputs); pr_debug("%s on clock %s\n", __func__, c->name); for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == c->parent) continue; ret = clk_enable(sel->input); if (ret) { while (sel != c->inputs) { sel--; if (sel->input == c->parent) continue; clk_disable(sel->input); } return ret; } } return tegra12_periph_clk_enable(c); } static void tegra12_clk_coupled_gate_disable(struct clk *c) { const struct clk_mux_sel *sel; BUG_ON(!c->inputs); pr_debug("%s on clock %s\n", __func__, c->name); tegra12_periph_clk_disable(c); if (!c->refcnt) /* happens only on boot clean-up: don't propagate */ return; for (sel = c->inputs; sel->input != NULL; sel++) { if (sel->input == c->parent) continue; if (sel->input->set) /* enforce coupling after boot only */ clk_disable(sel->input); } } static struct clk_ops tegra_clk_coupled_gate_ops = { .init = tegra12_periph_clk_init, .enable = tegra12_clk_coupled_gate_enable, .disable = tegra12_clk_coupled_gate_disable, .reset = &tegra12_periph_clk_reset, }; /* * AHB and APB shared bus operations * APB shared bus is a user of AHB shared bus * AHB shared bus is a user of SCLK complex shared bus * SCLK/AHB and AHB/APB dividers can be dynamically changed. When AHB and APB * users requests are propagated to SBUS target rate, current values of the * dividers are ignored, and flat maximum request is selected as SCLK bus final * target. Then the dividers will be re-evaluated, based on AHB and APB targets. * Both AHB and APB buses are always enabled. */ static void tegra12_clk_ahb_apb_init(struct clk *c, struct clk *bus_clk) { tegra_clk_shared_bus_user_init(c); c->max_rate = bus_clk->max_rate; c->min_rate = bus_clk->min_rate; c->mul = bus_clk->mul; c->div = bus_clk->div; c->u.shared_bus_user.rate = clk_get_rate(bus_clk); c->u.shared_bus_user.enabled = true; c->parent->child_bus = c; } static void tegra12_clk_ahb_init(struct clk *c) { struct clk *bus_clk = c->parent->u.system.hclk; tegra12_clk_ahb_apb_init(c, bus_clk); } static void tegra12_clk_apb_init(struct clk *c) { struct clk *bus_clk = c->parent->parent->u.system.pclk; tegra12_clk_ahb_apb_init(c, bus_clk); } static int tegra12_clk_ahb_apb_update(struct clk *bus) { unsigned long rate; if (detach_shared_bus) return 0; rate = tegra12_clk_shared_bus_update(bus, NULL, NULL, NULL); return clk_set_rate_locked(bus, rate); } static struct clk_ops tegra_clk_ahb_ops = { .init = tegra12_clk_ahb_init, .set_rate = tegra_clk_shared_bus_user_set_rate, .round_rate = tegra_clk_shared_bus_user_round_rate, .shared_bus_update = tegra12_clk_ahb_apb_update, }; static struct clk_ops tegra_clk_apb_ops = { .init = tegra12_clk_apb_init, .set_rate = tegra_clk_shared_bus_user_set_rate, .round_rate = tegra_clk_shared_bus_user_round_rate, .shared_bus_update = tegra12_clk_ahb_apb_update, }; /* Clock definitions */ static struct clk tegra_clk_32k = { .name = "clk_32k", .rate = 32768, .ops = NULL, .max_rate = 32768, }; static struct clk tegra_clk_m = { .name = "clk_m", .flags = ENABLE_ON_INIT, .ops = &tegra_clk_m_ops, .max_rate = 48000000, }; static struct clk tegra_clk_m_div2 = { .name = "clk_m_div2", .ops = &tegra_clk_m_div_ops, .parent = &tegra_clk_m, .mul = 1, .div = 2, .state = ON, .max_rate = 24000000, }; static struct clk tegra_clk_m_div4 = { .name = "clk_m_div4", .ops = &tegra_clk_m_div_ops, .parent = &tegra_clk_m, .mul = 1, .div = 4, .state = ON, .max_rate = 12000000, }; static struct clk tegra_pll_ref = { .name = "pll_ref", .flags = ENABLE_ON_INIT, .ops = &tegra_pll_ref_ops, .parent = &tegra_clk_m, .max_rate = 26000000, }; static struct clk_pll_freq_table tegra_pll_c_freq_table[] = { { 12000000, 600000000, 100, 1, 2}, { 13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */ { 16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */ { 19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */ { 26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_c = { .name = "pll_c", .ops = &tegra_pllxc_ops, .reg = 0x80, .parent = &tegra_pll_ref, .max_rate = 1400000000, .u.pll = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */ .vco_min = 600000000, .vco_max = 1400000000, .freq_table = tegra_pll_c_freq_table, .lock_delay = 300, .misc1 = 0x88 - 0x80, .round_p_to_pdiv = pllxc_round_p_to_pdiv, }, }; static struct clk tegra_pll_c_out1 = { .name = "pll_c_out1", .ops = &tegra_pll_div_ops, #ifdef CONFIG_TEGRA_DUAL_CBUS .flags = DIV_U71, #else .flags = DIV_U71 | PERIPH_ON_CBUS, #endif .parent = &tegra_pll_c, .reg = 0x84, .reg_shift = 0, .max_rate = 700000000, }; static struct clk_pll_freq_table tegra_pll_cx_freq_table[] = { { 12000000, 600000000, 100, 1, 2}, { 13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */ { 16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */ { 19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */ { 26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_c2 = { .name = "pll_c2", .ops = &tegra_pllcx_ops, .flags = PLL_ALT_MISC_REG, .reg = 0x4e8, .parent = &tegra_pll_ref, .max_rate = 1200000000, .u.pll = { .input_min = 12000000, .input_max = 48000000, .cf_min = 12000000, .cf_max = 19200000, .vco_min = 650000000, .vco_max = 1300000000, .freq_table = tegra_pll_cx_freq_table, .lock_delay = 360, .misc1 = 0x4f0 - 0x4e8, .round_p_to_pdiv = pllcx_round_p_to_pdiv, }, }; static struct clk tegra_pll_c3 = { .name = "pll_c3", .ops = &tegra_pllcx_ops, .flags = PLL_ALT_MISC_REG, .reg = 0x4fc, .parent = &tegra_pll_ref, .max_rate = 1200000000, .u.pll = { .input_min = 12000000, .input_max = 48000000, .cf_min = 12000000, .cf_max = 19200000, .vco_min = 650000000, .vco_max = 1300000000, .freq_table = tegra_pll_cx_freq_table, .lock_delay = 360, .misc1 = 0x504 - 0x4fc, .round_p_to_pdiv = pllcx_round_p_to_pdiv, }, }; static struct clk_pll_freq_table tegra_pll_m_freq_table[] = { { 12000000, 800000000, 66, 1, 1}, /* actual: 792.0 MHz */ { 13000000, 800000000, 61, 1, 1}, /* actual: 793.0 MHz */ { 16800000, 800000000, 47, 1, 1}, /* actual: 789.6 MHz */ { 19200000, 800000000, 41, 1, 1}, /* actual: 787.2 MHz */ { 26000000, 800000000, 61, 2, 1}, /* actual: 793.0 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_m = { .name = "pll_m", .flags = PLLM, .ops = &tegra_pllm_ops, .reg = 0x90, .parent = &tegra_pll_ref, .max_rate = 1200000000, .u.pll = { .input_min = 12000000, .input_max = 500000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */ .vco_min = 500000000, .vco_max = 1200000000, .freq_table = tegra_pll_m_freq_table, .lock_delay = 300, .misc1 = 0x98 - 0x90, .round_p_to_pdiv = pllm_round_p_to_pdiv, }, }; static struct clk tegra_pll_m_out1 = { .name = "pll_m_out1", .ops = &tegra_pll_div_ops, .flags = DIV_U71 | DIV_U71_INT, .parent = &tegra_pll_m, .reg = 0x94, .reg_shift = 0, .max_rate = 1200000000, }; static struct clk_pll_freq_table tegra_pll_p_freq_table[] = { { 12000000, 408000000, 816, 12, 2, 8}, { 13000000, 408000000, 816, 13, 2, 8}, { 16800000, 408000000, 680, 14, 2, 8}, { 19200000, 408000000, 680, 16, 2, 8}, { 26000000, 408000000, 816, 26, 2, 8}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_p = { .name = "pll_p", .flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON, .ops = &tegra_pllp_ops, .reg = 0xa0, .parent = &tegra_pll_ref, .max_rate = 432000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 200000000, .vco_max = 700000000, .freq_table = tegra_pll_p_freq_table, .lock_delay = 300, }, }; static struct clk tegra_pll_p_out1 = { .name = "pll_p_out1", .ops = &tegra_pll_div_ops, .flags = DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa4, .reg_shift = 0, .max_rate = 432000000, }; static struct clk tegra_pll_p_out2 = { .name = "pll_p_out2", .ops = &tegra_pll_div_ops, .flags = DIV_U71 | DIV_U71_FIXED | DIV_U71_INT, .parent = &tegra_pll_p, .reg = 0xa4, .reg_shift = 16, .max_rate = 432000000, }; static struct clk tegra_pll_p_out3 = { .name = "pll_p_out3", .ops = &tegra_pll_div_ops, .flags = DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa8, .reg_shift = 0, .max_rate = 432000000, }; static struct clk tegra_pll_p_out4 = { .name = "pll_p_out4", .ops = &tegra_pll_div_ops, .flags = DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0xa8, .reg_shift = 16, .max_rate = 432000000, }; static struct clk tegra_pll_p_out5 = { .name = "pll_p_out5", .ops = &tegra_pll_div_ops, .flags = DIV_U71 | DIV_U71_FIXED, .parent = &tegra_pll_p, .reg = 0x67c, .reg_shift = 16, .max_rate = 432000000, }; static struct clk_pll_freq_table tegra_pll_a_freq_table[] = { { 9600000, 282240000, 147, 5, 1, 4}, { 9600000, 368640000, 192, 5, 1, 4}, { 9600000, 240000000, 200, 8, 1, 8}, { 28800000, 282240000, 245, 25, 1, 8}, { 28800000, 368640000, 320, 25, 1, 8}, { 28800000, 240000000, 200, 24, 1, 8}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_a = { .name = "pll_a", .flags = PLL_HAS_CPCON, .ops = &tegra_pll_ops, .reg = 0xb0, .parent = &tegra_pll_p_out1, .max_rate = 700000000, .u.pll = { .input_min = 2000000, .input_max = 31000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 200000000, .vco_max = 700000000, .freq_table = tegra_pll_a_freq_table, .lock_delay = 300, }, }; static struct clk tegra_pll_a_out0 = { .name = "pll_a_out0", .ops = &tegra_pll_div_ops, .flags = DIV_U71, .parent = &tegra_pll_a, .reg = 0xb4, .reg_shift = 0, .max_rate = 100000000, }; static struct clk_pll_freq_table tegra_pll_d_freq_table[] = { { 12000000, 216000000, 864, 12, 4, 12}, { 13000000, 216000000, 864, 13, 4, 12}, { 16800000, 216000000, 720, 14, 4, 12}, { 19200000, 216000000, 720, 16, 4, 12}, { 26000000, 216000000, 864, 26, 4, 12}, { 12000000, 594000000, 99, 2, 1, 8}, { 13000000, 594000000, 594, 13, 1, 12}, { 16800000, 594000000, 495, 14, 1, 12}, { 19200000, 594000000, 495, 16, 1, 12}, { 26000000, 594000000, 594, 26, 1, 12}, { 12000000, 1000000000, 1000, 12, 1, 12}, { 13000000, 1000000000, 1000, 13, 1, 12}, { 19200000, 1000000000, 625, 12, 1, 12}, { 26000000, 1000000000, 1000, 26, 1, 12}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_d = { .name = "pll_d", .flags = PLL_HAS_CPCON | PLLD, .ops = &tegra_plld_ops, .reg = 0xd0, .parent = &tegra_pll_ref, .max_rate = 1500000000, .u.pll = { .input_min = 2000000, .input_max = 40000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 500000000, .vco_max = 1500000000, .freq_table = tegra_pll_d_freq_table, .lock_delay = 1000, }, }; static struct clk tegra_pll_d_out0 = { .name = "pll_d_out0", .ops = &tegra_pll_div_ops, .flags = DIV_2 | PLLD, .parent = &tegra_pll_d, .max_rate = 750000000, }; static struct clk_pll_freq_table tegra_pll_u_freq_table[] = { { 12000000, 480000000, 960, 12, 2, 12}, { 13000000, 480000000, 960, 13, 2, 12}, { 16800000, 480000000, 400, 7, 2, 5}, { 19200000, 480000000, 200, 4, 2, 3}, { 26000000, 480000000, 960, 26, 2, 12}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_u = { .name = "pll_u", .flags = PLL_HAS_CPCON | PLLU, .ops = &tegra_pll_ops, .reg = 0xc0, .parent = &tegra_pll_ref, .max_rate = 480000000, .u.pll = { .input_min = 2000000, .input_max = 40000000, .cf_min = 1000000, .cf_max = 6000000, .vco_min = 480000000, .vco_max = 960000000, .freq_table = tegra_pll_u_freq_table, .lock_delay = 1000, }, }; static struct clk tegra_pll_u_480M = { .name = "pll_u_480M", .flags = PLLU, .ops = &tegra_pll_div_ops, .reg = 0xc0, .reg_shift = 22, .parent = &tegra_pll_u, .mul = 1, .div = 1, .max_rate = 480000000, }; static struct clk tegra_pll_u_60M = { .name = "pll_u_60M", .flags = PLLU, .ops = &tegra_pll_div_ops, .reg = 0xc0, .reg_shift = 23, .parent = &tegra_pll_u, .mul = 1, .div = 8, .max_rate = 60000000, }; static struct clk tegra_pll_u_48M = { .name = "pll_u_48M", .flags = PLLU, .ops = &tegra_pll_div_ops, .reg = 0xc0, .reg_shift = 25, .parent = &tegra_pll_u, .mul = 1, .div = 10, .max_rate = 48000000, }; static struct clk tegra_pll_u_12M = { .name = "pll_u_12M", .flags = PLLU, .ops = &tegra_pll_div_ops, .reg = 0xc0, .reg_shift = 21, .parent = &tegra_pll_u, .mul = 1, .div = 40, .max_rate = 12000000, }; static struct clk_pll_freq_table tegra_pll_x_freq_table[] = { /* 1 GHz */ { 12000000, 1000000000, 83, 1, 1}, /* actual: 996.0 MHz */ { 13000000, 1000000000, 76, 1, 1}, /* actual: 988.0 MHz */ { 16800000, 1000000000, 59, 1, 1}, /* actual: 991.2 MHz */ { 19200000, 1000000000, 52, 1, 1}, /* actual: 998.4 MHz */ { 26000000, 1000000000, 76, 2, 1}, /* actual: 988.0 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_x = { .name = "pll_x", .flags = PLL_ALT_MISC_REG | PLLX, .ops = &tegra_pllxc_ops, #ifndef CONFIG_ARCH_TEGRA_13x_SOC .reg = 0xe0, #else .reg = 0x0, #endif .parent = &tegra_pll_ref, .max_rate = 3000000000UL, .u.pll = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */ .vco_min = 1200000000, .vco_max = 3000000000UL, .freq_table = tegra_pll_x_freq_table, .lock_delay = 300, #ifndef CONFIG_ARCH_TEGRA_13x_SOC .misc1 = 0x510 - 0xe0, #else .misc1 = 0x8, #endif .round_p_to_pdiv = pllxc_round_p_to_pdiv, }, }; static struct clk tegra_pll_x_out0 = { .name = "pll_x_out0", .ops = &tegra_pll_div_ops, .flags = DIV_2 | PLLX, .parent = &tegra_pll_x, .max_rate = 1500000000UL, }; static struct clk tegra_dfll_cpu = { .name = "dfll_cpu", .flags = DFLL, .ops = &tegra_dfll_ops, #ifndef CONFIG_ARCH_TEGRA_13x_SOC .reg = 0x2f4, #else .reg = 0x80, #endif .max_rate = 3000000000UL, }; static struct clk_pll_freq_table tegra_pllc4_freq_table[] = { { 12000000, 600000000, 100, 1, 2}, { 13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */ { 16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */ { 19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */ { 26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_c4 = { .name = "pll_c4", .flags = PLL_ALT_MISC_REG, .ops = &tegra_pllss_ops, .reg = 0x5a4, .parent = &tegra_pll_ref, /* s/w policy, always tegra_pll_ref */ .max_rate = 600000000, .u.pll = { .input_min = 12000000, .input_max = 1000000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */ .vco_min = 600000000, .vco_max = 1200000000, .freq_table = tegra_pllc4_freq_table, .lock_delay = 300, .misc1 = 0x8, .round_p_to_pdiv = pllss_round_p_to_pdiv, .cpcon_default = 0, }, }; static struct clk_pll_freq_table tegra_plldp_freq_table[] = { { 12000000, 270000000, 90, 1, 4}, { 13000000, 270000000, 83, 1, 4}, /* actual: 269.8 MHz */ { 16800000, 270000000, 96, 1, 6}, /* actual: 268.8 MHz */ { 19200000, 270000000, 84, 1, 6}, /* actual: 268.8 MHz */ { 26000000, 270000000, 83, 2, 4}, /* actual: 269.8 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_dp = { .name = "pll_dp", .flags = PLL_ALT_MISC_REG, .ops = &tegra_pllss_ops, .reg = 0x590, .parent = &tegra_pll_ref, /* s/w policy, always tegra_pll_ref */ .max_rate = 600000000, .u.pll = { .input_min = 12000000, .input_max = 1000000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */ .vco_min = 600000000, .vco_max = 1200000000, .freq_table = tegra_plldp_freq_table, .lock_delay = 300, .misc1 = 0x8, .round_p_to_pdiv = pllss_round_p_to_pdiv, .cpcon_default = 0, }, }; static struct clk_pll_freq_table tegra_plld2_freq_table[] = { { 12000000, 594000000, 99, 1, 2, 3}, /*4kx2k @ 30Hz*/ { 12000000, 297000000, 99, 1, 4, 1}, /*1080p @ 60Hz*/ { 12000000, 148500000, 99, 1, 8, 1}, /* 720p @ 60Hz*/ { 12000000, 54000000, 54, 1, 6, 1}, /* 480p @ 60Hz*/ { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */ { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */ { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */ { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_d2 = { .name = "pll_d2", .flags = PLL_ALT_MISC_REG, .ops = &tegra_pllss_ops, .reg = 0x4b8, .parent = &tegra_pll_ref, /* s/w policy, always tegra_pll_ref */ .max_rate = 600000000, .u.pll = { .input_min = 12000000, .input_max = 1000000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */ .vco_min = 600000000, .vco_max = 1200000000, .freq_table = tegra_plld2_freq_table, .lock_delay = 300, .misc1 = 0x570 - 0x4b8, .round_p_to_pdiv = pllss_round_p_to_pdiv, .cpcon_default = 0, }, }; static struct clk tegra_pll_re_vco = { .name = "pll_re_vco", .flags = PLL_ALT_MISC_REG, .ops = &tegra_pllre_ops, .reg = 0x4c4, .parent = &tegra_pll_ref, .max_rate = 672000000, .u.pll = { .input_min = 12000000, .input_max = 1000000000, .cf_min = 12000000, .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */ .vco_min = 300000000, .vco_max = 672000000, .lock_delay = 300, .round_p_to_pdiv = pllre_round_p_to_pdiv, }, }; static struct clk tegra_pll_re_out = { .name = "pll_re_out", .ops = &tegra_pllre_out_ops, .parent = &tegra_pll_re_vco, .reg = 0x4c4, .max_rate = 672000000, }; static struct clk_pll_freq_table tegra_pll_e_freq_table[] = { /* PLLE special case: use cpcon field to store cml divider value */ { 336000000, 100000000, 100, 21, 16, 11}, { 312000000, 100000000, 200, 26, 24, 13}, { 13000000, 100000000, 200, 1, 26, 13}, { 12000000, 100000000, 200, 1, 24, 13}, { 0, 0, 0, 0, 0, 0 }, }; static struct clk tegra_pll_e = { .name = "pll_e", .flags = PLL_ALT_MISC_REG, .ops = &tegra_plle_ops, .reg = 0xe8, .max_rate = 100000000, .u.pll = { .input_min = 12000000, .input_max = 1000000000, .cf_min = 12000000, .cf_max = 75000000, .vco_min = 1600000000, .vco_max = 2400000000U, .freq_table = tegra_pll_e_freq_table, .lock_delay = 300, .fixed_rate = 100000000, }, }; static struct clk tegra_cml0_clk = { .name = "cml0", .parent = &tegra_pll_e, .ops = &tegra_cml_clk_ops, .reg = PLLE_AUX, .max_rate = 100000000, .u.periph = { .clk_num = 0, }, }; static struct clk tegra_cml1_clk = { .name = "cml1", .parent = &tegra_pll_e, .ops = &tegra_cml_clk_ops, .reg = PLLE_AUX, .max_rate = 100000000, .u.periph = { .clk_num = 1, }, }; static struct clk tegra_pciex_clk = { .name = "pciex", .parent = &tegra_pll_e, .ops = &tegra_pciex_clk_ops, .max_rate = 500000000, .u.periph = { .clk_num = 74, }, }; /* Audio sync clocks */ #define SYNC_SOURCE(_id, _dev) \ { \ .name = #_id "_sync", \ .lookup = { \ .dev_id = #_dev , \ .con_id = "ext_audio_sync", \ }, \ .rate = 24000000, \ .max_rate = 24000000, \ .ops = &tegra_sync_source_ops \ } static struct clk tegra_sync_source_list[] = { SYNC_SOURCE(spdif_in, tegra30-spdif), SYNC_SOURCE(i2s0, tegra30-i2s.0), SYNC_SOURCE(i2s1, tegra30-i2s.1), SYNC_SOURCE(i2s2, tegra30-i2s.2), SYNC_SOURCE(i2s3, tegra30-i2s.3), SYNC_SOURCE(i2s4, tegra30-i2s.4), SYNC_SOURCE(vimclk, vimclk), }; static struct clk_mux_sel mux_d_audio_clk[] = { { .input = &tegra_pll_a_out0, .value = 0}, { .input = &tegra_pll_p, .value = 0x8000}, { .input = &tegra_clk_m, .value = 0xc000}, { .input = &tegra_sync_source_list[0], .value = 0xE000}, { .input = &tegra_sync_source_list[1], .value = 0xE001}, { .input = &tegra_sync_source_list[2], .value = 0xE002}, { .input = &tegra_sync_source_list[3], .value = 0xE003}, { .input = &tegra_sync_source_list[4], .value = 0xE004}, { .input = &tegra_sync_source_list[5], .value = 0xE005}, { .input = &tegra_pll_a_out0, .value = 0xE006}, { .input = &tegra_sync_source_list[6], .value = 0xE007}, { 0, 0 } }; static struct clk_mux_sel mux_audio_sync_clk[] = { { .input = &tegra_sync_source_list[0], .value = 0}, { .input = &tegra_sync_source_list[1], .value = 1}, { .input = &tegra_sync_source_list[2], .value = 2}, { .input = &tegra_sync_source_list[3], .value = 3}, { .input = &tegra_sync_source_list[4], .value = 4}, { .input = &tegra_sync_source_list[5], .value = 5}, { .input = &tegra_pll_a_out0, .value = 6}, { .input = &tegra_sync_source_list[6], .value = 7}, { 0, 0 } }; #define AUDIO_SYNC_CLK(_id, _dev, _index) \ { \ .name = #_id, \ .lookup = { \ .dev_id = #_dev, \ .con_id = "audio_sync", \ }, \ .inputs = mux_audio_sync_clk, \ .reg = 0x4A0 + (_index) * 4, \ .max_rate = 24000000, \ .ops = &tegra_audio_sync_clk_ops \ } static struct clk tegra_clk_audio_list[] = { AUDIO_SYNC_CLK(audio0, tegra30-i2s.0, 0), AUDIO_SYNC_CLK(audio1, tegra30-i2s.1, 1), AUDIO_SYNC_CLK(audio2, tegra30-i2s.2, 2), AUDIO_SYNC_CLK(audio3, tegra30-i2s.3, 3), AUDIO_SYNC_CLK(audio4, tegra30-i2s.4, 4), AUDIO_SYNC_CLK(audio, tegra30-spdif, 5), /* SPDIF */ }; #define AUDIO_SYNC_2X_CLK(_id, _dev, _index) \ { \ .name = #_id "_2x", \ .lookup = { \ .dev_id = #_dev, \ .con_id = "audio_sync_2x" \ }, \ .flags = PERIPH_NO_RESET, \ .max_rate = 48000000, \ .ops = &tegra_clk_double_ops, \ .reg = 0x49C, \ .reg_shift = 24 + (_index), \ .parent = &tegra_clk_audio_list[(_index)], \ .u.periph = { \ .clk_num = 113 + (_index), \ }, \ } static struct clk tegra_clk_audio_2x_list[] = { AUDIO_SYNC_2X_CLK(audio0, tegra30-i2s.0, 0), AUDIO_SYNC_2X_CLK(audio1, tegra30-i2s.1, 1), AUDIO_SYNC_2X_CLK(audio2, tegra30-i2s.2, 2), AUDIO_SYNC_2X_CLK(audio3, tegra30-i2s.3, 3), AUDIO_SYNC_2X_CLK(audio4, tegra30-i2s.4, 4), AUDIO_SYNC_2X_CLK(audio, tegra30-spdif, 5), /* SPDIF */ }; #define MUX_I2S_SPDIF(_id, _index) \ static struct clk_mux_sel mux_pllaout0_##_id##_2x_pllp_clkm[] = { \ {.input = &tegra_pll_a_out0, .value = 0}, \ {.input = &tegra_clk_audio_2x_list[(_index)], .value = 2}, \ {.input = &tegra_pll_p, .value = 4}, \ {.input = &tegra_clk_m, .value = 6}, \ { 0, 0}, \ } MUX_I2S_SPDIF(audio0, 0); MUX_I2S_SPDIF(audio1, 1); MUX_I2S_SPDIF(audio2, 2); MUX_I2S_SPDIF(audio3, 3); MUX_I2S_SPDIF(audio4, 4); MUX_I2S_SPDIF(audio, 5); /* SPDIF */ /* External clock outputs (through PMC) */ #define MUX_EXTERN_OUT(_id) \ static struct clk_mux_sel mux_clkm_clkm2_clkm4_extern##_id[] = { \ {.input = &tegra_clk_m, .value = 0}, \ {.input = &tegra_clk_m_div2, .value = 1}, \ {.input = &tegra_clk_m_div4, .value = 2}, \ {.input = NULL, .value = 3}, /* placeholder */ \ { 0, 0}, \ } MUX_EXTERN_OUT(1); MUX_EXTERN_OUT(2); MUX_EXTERN_OUT(3); static struct clk_mux_sel *mux_extern_out_list[] = { mux_clkm_clkm2_clkm4_extern1, mux_clkm_clkm2_clkm4_extern2, mux_clkm_clkm2_clkm4_extern3, }; #define CLK_OUT_CLK(_id, _max_rate) \ { \ .name = "clk_out_" #_id, \ .lookup = { \ .dev_id = "clk_out_" #_id, \ .con_id = "extern" #_id, \ }, \ .ops = &tegra_clk_out_ops, \ .reg = 0x1a8, \ .inputs = mux_clkm_clkm2_clkm4_extern##_id, \ .flags = MUX_CLK_OUT, \ .max_rate = _max_rate, \ .u.periph = { \ .clk_num = (_id - 1) * 8 + 2, \ }, \ } static struct clk tegra_clk_out_list[] = { CLK_OUT_CLK(1, 26000000), CLK_OUT_CLK(2, 40800000), CLK_OUT_CLK(3, 26000000), }; /* called after peripheral external clocks are initialized */ static void init_clk_out_mux(void) { int i; struct clk *c; /* output clock con_id is the name of peripheral external clock connected to input 3 of the output mux */ for (i = 0; i < ARRAY_SIZE(tegra_clk_out_list); i++) { c = tegra_get_clock_by_name( tegra_clk_out_list[i].lookup.con_id); if (!c) pr_err("%s: could not find clk %s\n", __func__, tegra_clk_out_list[i].lookup.con_id); mux_extern_out_list[i][3].input = c; } } static struct clk_mux_sel mux_sclk[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_c_out1, .value = 1}, { .input = &tegra_pll_p_out4, .value = 2}, { .input = &tegra_pll_p, .value = 3}, { .input = &tegra_pll_p_out2, .value = 4}, { .input = &tegra_pll_c, .value = 5}, { .input = &tegra_clk_32k, .value = 6}, { .input = &tegra_pll_m_out1, .value = 7}, { 0, 0}, }; static struct clk tegra_clk_sclk = { .name = "sclk", .inputs = mux_sclk, .reg = 0x28, .ops = &tegra_super_ops, .max_rate = 420000000, .min_rate = 12000000, }; /* Peripheral muxes */ #ifndef CONFIG_ARCH_TEGRA_13x_SOC static struct clk_mux_sel mux_cclk_g[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_clk_32k, .value = 2}, { .input = &tegra_pll_m, .value = 3}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_p_out4, .value = 5}, /* { .input = &tegra_pll_c2, .value = 6}, - no use on tegra12x */ /* { .input = &tegra_clk_c3, .value = 7}, - no use on tegra12x */ { .input = &tegra_pll_x, .value = 8}, { .input = &tegra_dfll_cpu, .value = 15}, { 0, 0}, }; static struct clk_mux_sel mux_cclk_lp[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_clk_32k, .value = 2}, { .input = &tegra_pll_m, .value = 3}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_p_out4, .value = 5}, /* { .input = &tegra_pll_c2, .value = 6}, - no use on tegra12x */ /* { .input = &tegra_clk_c3, .value = 7}, - no use on tegra12x */ { .input = &tegra_pll_x_out0, .value = 8}, { .input = &tegra_pll_x, .value = 8 | SUPER_LP_DIV2_BYPASS}, { 0, 0}, }; #else static struct clk_mux_sel mux_cclk_g[] = { { .input = &tegra_clk_m, .value = 0}, /* { .input = , .value = 1}, - testclk */ { .input = &tegra_clk_m, .value = 2}, { .input = &tegra_pll_ref, .value = 3}, { .input = &tegra_pll_m, .value = 4}, { .input = &tegra_pll_p, .value = 5}, { .input = &tegra_clk_sclk, .value = 6}, { .input = &tegra_clk_m, .value = 7}, { .input = &tegra_pll_x, .value = 8}, /* { .input = , .value = 9}, - High jitter DFLL */ /* { .input = , .value = 14}, - High jitter PLLX */ { .input = &tegra_dfll_cpu, .value = 15}, { 0, 0}, }; #endif #ifndef CONFIG_ARCH_TEGRA_13x_SOC static struct clk tegra_clk_cclk_g = { .name = "cclk_g", .flags = DIV_U71 | DIV_U71_INT | MUX, .inputs = mux_cclk_g, .reg = 0x368, .ops = &tegra_super_ops, .max_rate = 3000000000UL, }; static struct clk tegra_clk_cclk_lp = { .name = "cclk_lp", .flags = DIV_2 | DIV_U71 | DIV_U71_INT | MUX, .inputs = mux_cclk_lp, .reg = 0x370, .ops = &tegra_super_ops, .max_rate = 1350000000, }; #else static struct clk tegra_clk_cclk_g = { .name = "cclk_g", .flags = DIV_U71 | DIV_U71_INT | MUX, .inputs = mux_cclk_g, .reg = 0x20, .ops = &tegra13_super_cclk_ops, .max_rate = 3000000000UL, }; #endif static struct raw_notifier_head cpu_g_rate_change_nh; static struct clk tegra_clk_virtual_cpu_g = { .name = "cpu_g", .parent = &tegra_clk_cclk_g, .ops = &tegra_cpu_ops, .max_rate = 3000000000UL, .min_rate = 3187500, .u.cpu = { .main = &tegra_pll_x, #ifndef CONFIG_ARCH_TEGRA_13x_SOC .backup = &tegra_pll_p_out4, #else .backup = &tegra_pll_p, #endif .dynamic = &tegra_dfll_cpu, .mode = MODE_G, }, .rate_change_nh = &cpu_g_rate_change_nh, }; #ifndef CONFIG_ARCH_TEGRA_13x_SOC static struct clk tegra_clk_virtual_cpu_lp = { .name = "cpu_lp", .parent = &tegra_clk_cclk_lp, .ops = &tegra_cpu_ops, .max_rate = 1350000000, .min_rate = 3187500, .u.cpu = { .main = &tegra_pll_x, .backup = &tegra_pll_p_out4, .mode = MODE_LP, }, }; #endif static struct clk_mux_sel mux_cpu_cmplx[] = { { .input = &tegra_clk_virtual_cpu_g, .value = 0}, #ifndef CONFIG_ARCH_TEGRA_13x_SOC { .input = &tegra_clk_virtual_cpu_lp, .value = 1}, #endif { 0, 0}, }; static struct clk tegra_clk_cpu_cmplx = { .name = "cpu", .inputs = mux_cpu_cmplx, .ops = &tegra_cpu_cmplx_ops, .max_rate = 3000000000UL, }; static struct clk tegra_clk_cop = { .name = "cop", .parent = &tegra_clk_sclk, .ops = &tegra_cop_ops, .max_rate = 408000000, }; static struct clk tegra_clk_hclk = { .name = "hclk", .flags = DIV_BUS, .parent = &tegra_clk_sclk, .reg = 0x30, .reg_shift = 4, .ops = &tegra_bus_ops, .max_rate = 408000000, .min_rate = 12000000, }; static struct clk tegra_clk_pclk = { .name = "pclk", .flags = DIV_BUS, .parent = &tegra_clk_hclk, .reg = 0x30, .reg_shift = 0, .ops = &tegra_bus_ops, .max_rate = 204000000, .min_rate = 12000000, }; static struct raw_notifier_head sbus_rate_change_nh; static struct clk tegra_clk_sbus_cmplx = { .name = "sbus", .parent = &tegra_clk_sclk, .ops = &tegra_sbus_cmplx_ops, .u.system = { .pclk = &tegra_clk_pclk, .hclk = &tegra_clk_hclk, .sclk_low = &tegra_pll_p_out2, #ifdef CONFIG_TEGRA_PLLM_SCALED .sclk_high = &tegra_pll_c_out1, #else .sclk_high = &tegra_pll_m_out1, #endif }, .rate_change_nh = &sbus_rate_change_nh, }; static struct clk tegra_clk_ahb = { .name = "ahb.sclk", .flags = DIV_BUS, .parent = &tegra_clk_sbus_cmplx, .ops = &tegra_clk_ahb_ops, }; static struct clk tegra_clk_apb = { .name = "apb.sclk", .flags = DIV_BUS, .parent = &tegra_clk_ahb, .ops = &tegra_clk_apb_ops, }; static struct clk tegra_clk_blink = { .name = "blink", .parent = &tegra_clk_32k, .reg = 0x40, .ops = &tegra_blink_clk_ops, .max_rate = 32768, }; /* Multimedia modules muxes */ static struct clk_mux_sel mux_pllm_pllc2_c_c3_pllp_plla[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c2, .value = 1}, { .input = &tegra_pll_c, .value = 2}, { .input = &tegra_pll_c3, .value = 3}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_a_out0, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c, .value = 2}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_a_out0, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla_v2[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_pll_a_out0, .value = 3}, /* Skip C2(4) */ /* Skip C2(5) */ { 0, 0}, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_pll_a_out0, .value = 3}, { .input = &tegra_pll_c2, .value = 4}, { .input = &tegra_pll_c3, .value = 5}, { .input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla_pllc4[] = { { .input = &tegra_pll_m, .value = 0}, /* Skip C2(1) */ { .input = &tegra_pll_c, .value = 2}, /* Skip C2(3) */ { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_a_out0, .value = 6}, { .input = &tegra_pll_c4, .value = 7}, { 0, 0}, }; static struct clk_mux_sel mux_pllm_pllc_pllp_plla_clkm_pllc4[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_pll_a_out0, .value = 3}, /* Skip C2(4) & C3(5) */ { .input = &tegra_clk_m, .value = 6}, { .input = &tegra_pll_c4, .value = 7}, { 0, 0}, }; static struct clk_mux_sel mux_plla_pllc_pllp_clkm[] = { { .input = &tegra_pll_a_out0, .value = 0}, { .input = &tegra_pll_c, .value = 2}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_clk_m, .value = 6}, { 0, 0}, }; /* EMC muxes */ /* FIXME: add EMC latency mux */ static struct clk_mux_sel mux_pllm_pllc_pllp_clkm[] = { { .input = &tegra_pll_m, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_clk_m, .value = 3}, { .input = &tegra_pll_m, .value = 4}, /* low jitter PLLM output */ /* { .input = &tegra_pll_c2, .value = 5}, - no use on tegra12x */ /* { .input = &tegra_pll_c3, .value = 6}, - no use on tegra12x */ { .input = &tegra_pll_c, .value = 7}, /* low jitter PLLC output */ { 0, 0}, }; /* Display subsystem muxes */ static struct clk_mux_sel mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_m, .value = 1}, {.input = &tegra_pll_d_out0, .value = 2}, {.input = &tegra_pll_a_out0, .value = 3}, {.input = &tegra_pll_c, .value = 4}, {.input = &tegra_pll_d2, .value = 5}, {.input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_plld_out0[] = { { .input = &tegra_pll_d_out0, .value = 0}, { 0, 0}, }; /* Peripheral muxes */ static struct clk_mux_sel mux_pllp_pllc_clkm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 2}, {.input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_clkm1[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 1}, {.input = &tegra_clk_m, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc2_c_c3_pllm_clkm[] = { { .input = &tegra_pll_p, .value = 0}, { .input = &tegra_pll_c2, .value = 1}, { .input = &tegra_pll_c, .value = 2}, { .input = &tegra_pll_c3, .value = 3}, { .input = &tegra_pll_m, .value = 4}, { .input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_pllm_clkm[] = { { .input = &tegra_pll_p, .value = 0}, { .input = &tegra_pll_c, .value = 2}, { .input = &tegra_pll_m, .value = 4}, { .input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_pllm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 2}, {.input = &tegra_pll_m, .value = 4}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_clkm_clk32_plle[] = { { .input = &tegra_pll_p, .value = 0}, { .input = &tegra_clk_m, .value = 1}, { .input = &tegra_clk_32k, .value = 2}, { .input = &tegra_pll_e, .value = 3}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_clkm[] = { { .input = &tegra_pll_p, .value = 0}, { .input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_clk32_clkm[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 2}, {.input = &tegra_clk_32k, .value = 4}, {.input = &tegra_clk_m, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_pllc_clkm_clk32[] = { {.input = &tegra_pll_p, .value = 0}, {.input = &tegra_pll_c, .value = 2}, {.input = &tegra_clk_m, .value = 4}, {.input = &tegra_clk_32k, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_plla_clk32_pllp_clkm_plle[] = { { .input = &tegra_pll_a_out0, .value = 0}, { .input = &tegra_clk_32k, .value = 1}, { .input = &tegra_pll_p, .value = 2}, { .input = &tegra_clk_m, .value = 3}, { .input = &tegra_pll_e, .value = 4}, { 0, 0}, }; static struct clk_mux_sel mux_clkm_pllp_pllc_pllre[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_p, .value = 1}, { .input = &tegra_pll_c, .value = 3}, { .input = &tegra_pll_re_out, .value = 5}, { 0, 0}, }; static struct clk_mux_sel mux_clkm_48M_pllp_480M[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_u_48M, .value = 2}, { .input = &tegra_pll_p, .value = 4}, { .input = &tegra_pll_u_480M, .value = 6}, { 0, 0}, }; static struct clk_mux_sel mux_clkm_pllre_clk32_480M_pllc_ref[] = { { .input = &tegra_clk_m, .value = 0}, { .input = &tegra_pll_re_out, .value = 1}, { .input = &tegra_clk_32k, .value = 2}, { .input = &tegra_pll_u_480M, .value = 3}, { .input = &tegra_pll_c, .value = 4}, { .input = &tegra_pll_ref, .value = 7}, { 0, 0}, }; static struct clk_mux_sel mux_pllp3_pllc_clkm[] = { { .input = &tegra_pll_p_out3, .value = 0}, { .input = &tegra_pll_c, .value = 1}, { .input = &tegra_clk_m, .value = 3}, { 0, 0}, }; /* Single clock source ("fake") muxes */ static struct clk_mux_sel mux_clk_m[] = { { .input = &tegra_clk_m, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_pllp_out3[] = { { .input = &tegra_pll_p_out3, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_clk_32k[] = { { .input = &tegra_clk_32k, .value = 0}, { 0, 0}, }; static struct clk_mux_sel mux_plld[] = { { .input = &tegra_pll_d_out0, .value = 1}, { 0, 0}, }; static struct raw_notifier_head emc_rate_change_nh; static struct clk tegra_clk_emc = { .name = "emc", .ops = &tegra_emc_clk_ops, .reg = 0x19c, .max_rate = 1200000000, .min_rate = 12750000, .inputs = mux_pllm_pllc_pllp_clkm, .flags = MUX | DIV_U71 | PERIPH_EMC_ENB, .u.periph = { .clk_num = 57, }, .rate_change_nh = &emc_rate_change_nh, }; static struct clk tegra_clk_mc = { .name = "mc", .ops = &tegra_mc_clk_ops, .max_rate = 533000000, .parent = &tegra_clk_emc, .flags = PERIPH_NO_RESET, .u.periph = { .clk_num = 32, }, }; static struct raw_notifier_head host1x_rate_change_nh; static struct clk tegra_clk_host1x = { .name = "host1x", .lookup = { .dev_id = "host1x", }, .ops = &tegra_1xbus_clk_ops, .reg = 0x180, .inputs = mux_pllm_pllc_pllp_plla, .flags = MUX | DIV_U71 | DIV_U71_INT, .max_rate = 500000000, .min_rate = 12000000, .u.periph = { .clk_num = 28, .pll_low = &tegra_pll_p, #ifdef CONFIG_TEGRA_PLLM_SCALED .pll_high = &tegra_pll_c, #else .pll_high = &tegra_pll_m, #endif }, .rate_change_nh = &host1x_rate_change_nh, }; static struct raw_notifier_head mselect_rate_change_nh; static struct clk tegra_clk_mselect = { .name = "mselect", .lookup = { .dev_id = "mselect", }, .ops = &tegra_1xbus_clk_ops, .reg = 0x3b4, .inputs = mux_pllp_clkm, .flags = MUX | DIV_U71 | DIV_U71_INT, .max_rate = 408000000, .min_rate = 12000000, .u.periph = { .clk_num = 99, .pll_low = &tegra_pll_p, .pll_high = &tegra_pll_p, .threshold = 408000000, }, .rate_change_nh = &mselect_rate_change_nh, }; #ifdef CONFIG_TEGRA_DUAL_CBUS static struct raw_notifier_head c2bus_rate_change_nh; static struct raw_notifier_head c3bus_rate_change_nh; static struct clk tegra_clk_c2bus = { .name = "c2bus", .parent = &tegra_pll_c2, .ops = &tegra_clk_cbus_ops, .max_rate = 700000000, .mul = 1, .div = 1, .flags = PERIPH_ON_CBUS, .shared_bus_backup = { .input = &tegra_pll_p, }, .rate_change_nh = &c2bus_rate_change_nh, }; static struct clk tegra_clk_c3bus = { .name = "c3bus", .parent = &tegra_pll_c3, .ops = &tegra_clk_cbus_ops, .max_rate = 900000000, .mul = 1, .div = 1, .flags = PERIPH_ON_CBUS, .shared_bus_backup = { .input = &tegra_pll_p, }, .rate_change_nh = &c3bus_rate_change_nh, }; #ifdef CONFIG_TEGRA_MIGRATE_CBUS_USERS static DEFINE_MUTEX(cbus_mutex); #define CROSS_CBUS_MUTEX (&cbus_mutex) #else #define CROSS_CBUS_MUTEX NULL #endif static struct clk_mux_sel mux_clk_cbus[] = { { .input = &tegra_clk_c2bus, .value = 0}, { .input = &tegra_clk_c3bus, .value = 1}, { 0, 0}, }; #define DUAL_CBUS_CLK(_name, _dev, _con, _parent, _id, _div, _mode)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_bus_user_ops, \ .parent = _parent, \ .inputs = mux_clk_cbus, \ .flags = MUX, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ }, \ .cross_clk_mutex = CROSS_CBUS_MUTEX, \ } #else static struct raw_notifier_head cbus_rate_change_nh; static struct clk tegra_clk_cbus = { .name = "cbus", .parent = &tegra_pll_c, .ops = &tegra_clk_cbus_ops, .max_rate = 700000000, .mul = 1, .div = 2, .flags = PERIPH_ON_CBUS, .shared_bus_backup = { .input = &tegra_pll_p, }, .rate_change_nh = &cbus_rate_change_nh, }; #endif static struct clk_ops tegra_clk_gpu_ops = { .enable = &tegra12_periph_clk_enable, .disable = &tegra12_periph_clk_disable, .reset = &tegra12_periph_clk_reset, }; /* This is a dummy clock for gpu. The enable/disable/reset routine controls input clock of the actual gpu clock. The input clock itself has a fixed frequency. The actual gpu clock's frequency is controlled by gpu driver, not here in clock framework. However, we assoicate this dummy clock with dvfs to control voltage of gpu rail along with frequency change of actual gpu clock. So frequency here and in dvfs are based on the acutal gpu clock. */ static struct clk tegra_clk_gpu = { .name = "gpu_ref", .ops = &tegra_clk_gpu_ops, .parent = &tegra_pll_ref, .u.periph = { .clk_num = 184, }, .max_rate = 48000000, .min_rate = 12000000, }; #define RATE_GRANULARITY 100000 /* 0.1 MHz */ #if defined(CONFIG_TEGRA_CLOCK_DEBUG_FUNC) static int gbus_round_pass_thru; void tegra_gbus_round_pass_thru_enable(bool enable) { if (enable) gbus_round_pass_thru = 1; else gbus_round_pass_thru = 0; } EXPORT_SYMBOL(tegra_gbus_round_pass_thru_enable); #else #define gbus_round_pass_thru 0 #endif static void tegra12_clk_gbus_init(struct clk *c) { unsigned long rate; bool enabled; pr_debug("%s on clock %s (export ops %s)\n", __func__, c->name, c->u.export_clk.ops ? "ready" : "not ready"); if (!c->u.export_clk.ops || !c->u.export_clk.ops->init) return; c->u.export_clk.ops->init(c->u.export_clk.ops->data, &rate, &enabled); c->div = clk_get_rate(c->parent) / RATE_GRANULARITY; c->mul = rate / RATE_GRANULARITY; c->state = enabled ? ON : OFF; } static int tegra12_clk_gbus_enable(struct clk *c) { pr_debug("%s on clock %s (export ops %s)\n", __func__, c->name, c->u.export_clk.ops ? "ready" : "not ready"); if (!c->u.export_clk.ops || !c->u.export_clk.ops->enable) return -ENOENT; return c->u.export_clk.ops->enable(c->u.export_clk.ops->data); } static void tegra12_clk_gbus_disable(struct clk *c) { pr_debug("%s on clock %s (export ops %s)\n", __func__, c->name, c->u.export_clk.ops ? "ready" : "not ready"); if (!c->u.export_clk.ops || !c->u.export_clk.ops->disable) return; c->u.export_clk.ops->disable(c->u.export_clk.ops->data); } static int tegra12_clk_gbus_set_rate(struct clk *c, unsigned long rate) { int ret; pr_debug("%s %lu on clock %s (export ops %s)\n", __func__, rate, c->name, c->u.export_clk.ops ? "ready" : "not ready"); if (!c->u.export_clk.ops || !c->u.export_clk.ops->set_rate) return -ENOENT; ret = c->u.export_clk.ops->set_rate(c->u.export_clk.ops->data, &rate); if (!ret) c->mul = rate / RATE_GRANULARITY; return ret; } static long tegra12_clk_gbus_round_updown(struct clk *c, unsigned long rate, bool up) { return gbus_round_pass_thru ? rate : tegra12_clk_cbus_round_updown(c, rate, up); } static long tegra12_clk_gbus_round_rate(struct clk *c, unsigned long rate) { return tegra12_clk_gbus_round_updown(c, rate, true); } static struct clk_ops tegra_clk_gbus_ops = { .init = tegra12_clk_gbus_init, .enable = tegra12_clk_gbus_enable, .disable = tegra12_clk_gbus_disable, .set_rate = tegra12_clk_gbus_set_rate, .round_rate = tegra12_clk_gbus_round_rate, .round_rate_updown = tegra12_clk_gbus_round_updown, .shared_bus_update = tegra12_clk_shared_connector_update, /* re-use */ }; static struct raw_notifier_head gbus_rate_change_nh; static struct clk tegra_clk_gbus = { .name = "gbus", .ops = &tegra_clk_gbus_ops, .parent = &tegra_clk_gpu, .max_rate = 1032000000, .shared_bus_flags = SHARED_BUS_RETENTION, .rate_change_nh = &gbus_rate_change_nh, }; static void tegra12_camera_mclk_init(struct clk *c) { c->state = OFF; c->set = true; if (!strcmp(c->name, "mclk")) { c->parent = tegra_get_clock_by_name("vi_sensor"); c->max_rate = c->parent->max_rate; } else if (!strcmp(c->name, "mclk2")) { c->parent = tegra_get_clock_by_name("vi_sensor2"); c->max_rate = c->parent->max_rate; } } static int tegra12_camera_mclk_set_rate(struct clk *c, unsigned long rate) { return clk_set_rate(c->parent, rate); } static struct clk_ops tegra_camera_mclk_ops = { .init = tegra12_camera_mclk_init, .enable = tegra12_periph_clk_enable, .disable = tegra12_periph_clk_disable, .set_rate = tegra12_camera_mclk_set_rate, }; static struct clk tegra_camera_mclk = { .name = "mclk", .ops = &tegra_camera_mclk_ops, .u.periph = { .clk_num = 92, /* csus */ }, .flags = PERIPH_NO_RESET, }; static struct clk tegra_camera_mclk2 = { .name = "mclk2", .ops = &tegra_camera_mclk_ops, .u.periph = { .clk_num = 171, /* vim2_clk */ }, .flags = PERIPH_NO_RESET, }; static struct clk tegra_clk_isp = { .name = "isp", .ops = &tegra_periph_clk_ops, .reg = 0x144, .max_rate = 700000000, .inputs = mux_pllm_pllc_pllp_plla_clkm_pllc4, .flags = MUX | DIV_U71 | PERIPH_NO_ENB | PERIPH_NO_RESET, }; static struct clk_mux_sel mux_isp[] = { { .input = &tegra_clk_isp, .value = 0}, { 0, 0}, }; static struct raw_notifier_head c4bus_rate_change_nh; static struct clk tegra_clk_c4bus = { .name = "c4bus", .parent = &tegra_pll_c4, .ops = &tegra_clk_cbus_ops, .max_rate = 700000000, .mul = 1, .div = 1, .shared_bus_backup = { .input = &tegra_pll_p, }, .rate_change_nh = &c4bus_rate_change_nh, }; #define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_periph_clk_ops, \ .reg = _reg, \ .inputs = _inputs, \ .flags = _flags, \ .max_rate = _max, \ .u.periph = { \ .clk_num = _clk_num, \ }, \ } #define PERIPH_CLK_EX(_name, _dev, _con, _clk_num, _reg, _max, _inputs, \ _flags, _ops) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = _ops, \ .reg = _reg, \ .inputs = _inputs, \ .flags = _flags, \ .max_rate = _max, \ .u.periph = { \ .clk_num = _clk_num, \ }, \ } #define D_AUDIO_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_periph_clk_ops, \ .reg = _reg, \ .inputs = _inputs, \ .flags = _flags, \ .max_rate = _max, \ .u.periph = { \ .clk_num = _clk_num, \ .src_mask = 0xE01F << 16, \ .src_shift = 16, \ }, \ } #define SHARED_CLK(_name, _dev, _con, _parent, _id, _div, _mode)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_bus_user_ops, \ .parent = _parent, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ }, \ } #define SHARED_LIMIT(_name, _dev, _con, _parent, _id, _div, _mode)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_bus_user_ops, \ .parent = _parent, \ .flags = BUS_RATE_LIMIT, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ }, \ } #define SHARED_CONNECT(_name, _dev, _con, _parent, _id, _div, _mode)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_connector_ops, \ .parent = _parent, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ }, \ } #define SHARED_EMC_CLK(_name, _dev, _con, _parent, _id, _div, _mode, _flag)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_bus_user_ops, \ .parent = _parent, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ .usage_flag = _flag, \ }, \ } static DEFINE_MUTEX(sbus_cross_mutex); #define SHARED_SCLK(_name, _dev, _con, _parent, _id, _div, _mode)\ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ .ops = &tegra_clk_shared_bus_user_ops, \ .parent = _parent, \ .u.shared_bus_user = { \ .client_id = _id, \ .client_div = _div, \ .mode = _mode, \ }, \ .cross_clk_mutex = &sbus_cross_mutex, \ } struct clk tegra_list_clks[] = { PERIPH_CLK("apbdma", "tegra-apbdma", NULL, 34, 0, 26000000, mux_clk_m, 0), PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB), PERIPH_CLK("kbc", "tegra-kbc", NULL, 36, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB), PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0), PERIPH_CLK("kfuse", "kfuse-tegra", NULL, 40, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("fuse", "fuse-tegra", "fuse", 39, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("fuse_burn", "fuse-tegra", "fuse_burn", 39, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("apbif", "tegra30-ahub", "apbif", 107, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("i2s0", "tegra30-i2s.0", NULL, 30, 0x1d8, 24576000, mux_pllaout0_audio0_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s1", "tegra30-i2s.1", NULL, 11, 0x100, 24576000, mux_pllaout0_audio1_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s2", "tegra30-i2s.2", NULL, 18, 0x104, 24576000, mux_pllaout0_audio2_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s3", "tegra30-i2s.3", NULL, 101, 0x3bc, 24576000, mux_pllaout0_audio3_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2s4", "tegra30-i2s.4", NULL, 102, 0x3c0, 24576000, mux_pllaout0_audio4_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("spdif_out", "tegra30-spdif", "spdif_out", 10, 0x108, 24576000, mux_pllaout0_audio_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("spdif_in", "tegra30-spdif", "spdif_in", 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("pwm", "tegra-pwm", NULL, 17, 0x110, 48000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71 | PERIPH_ON_APB), D_AUDIO_CLK("d_audio", "tegra30-ahub", "d_audio", 106, 0x3d0, 48000000, mux_d_audio_clk, MUX | DIV_U71 | PERIPH_ON_APB), D_AUDIO_CLK("dam0", "tegra30-dam.0", NULL, 108, 0x3d8, 40000000, mux_d_audio_clk, MUX | DIV_U71 | PERIPH_ON_APB), D_AUDIO_CLK("dam1", "tegra30-dam.1", NULL, 109, 0x3dc, 40000000, mux_d_audio_clk, MUX | DIV_U71 | PERIPH_ON_APB), D_AUDIO_CLK("dam2", "tegra30-dam.2", NULL, 110, 0x3e0, 40000000, mux_d_audio_clk, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("adx", "adx", NULL, 154, 0x638, 24580000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("adx1", "adx1", NULL, 180, 0x670, 24580000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("amx", "amx", NULL, 153, 0x63c, 24600000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("amx1", "amx1", NULL, 185, 0x674, 24600000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("afc0", "tegra124-afc.0", NULL, 186, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("afc1", "tegra124-afc.1", NULL, 187, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("afc2", "tegra124-afc.2", NULL, 188, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("afc3", "tegra124-afc.3", NULL, 189, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("afc4", "tegra124-afc.4", NULL, 190, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("afc5", "tegra124-afc.5", NULL, 191, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("hda", "tegra30-hda", "hda", 125, 0x428, 108000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("hda2codec_2x", "tegra30-hda", "hda2codec", 111, 0x3e4, 48000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("hda2hdmi", "tegra30-hda", "hda2hdmi", 128, 0, 48000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("sbc1", "spi-tegra114.0", NULL, 41, 0x134, 33000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc2", "spi-tegra114.1", NULL, 44, 0x118, 33000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc3", "spi-tegra114.2", NULL, 46, 0x11c, 33000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc4", "spi-tegra114.3", NULL, 68, 0x1b4, 33000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc5", "spi-tegra114.4", NULL, 104, 0x3c8, 33000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sbc6", "spi-tegra114.5", NULL, 105, 0x3cc, 33000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sata_oob", "tegra_sata_oob", NULL, 123, 0x420, 216000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sata", "tegra_sata", NULL, 124, 0x424, 216000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sata_cold", "tegra_sata_cold", NULL, 129, 0, 48000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 208000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 200000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 208000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 200000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc1_ddr", "sdhci-tegra.0", "ddr", 14, 0x150, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc3_ddr", "sdhci-tegra.2", "ddr", 69, 0x1bc, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("sdmmc4_ddr", "sdhci-tegra.3", "ddr", 15, 0x164, 102000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("cec", "tegra_cec", NULL, 136, 0, 250000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 600000000, mux_pllp_pllc2_c_c3_pllm_clkm, MUX | DIV_U71 | DIV_U71_INT), PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("trace", "trace", NULL, 77, 0x634, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("nor", "tegra-nor", NULL, 42, 0x1d0, 127000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("i2c1", "tegra12-i2c.0", "div-clk", 12, 0x124, 136000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c2", "tegra12-i2c.1", "div-clk", 54, 0x198, 136000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c3", "tegra12-i2c.2", "div-clk", 67, 0x1b8, 136000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c4", "tegra12-i2c.3", "div-clk", 103, 0x3c4, 136000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c5", "tegra12-i2c.4", "div-clk", 47, 0x128, 136000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("i2c6", "tegra12-i2c.5", "div-clk", 166, 0x65c, 58300000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB), PERIPH_CLK("mipi-cal", "mipi-cal", NULL, 56, 0, 60000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("mipi-cal-fixed", "mipi-cal-fixed", NULL, 0, 0, 108000000, mux_pllp_out3, PERIPH_NO_ENB), PERIPH_CLK("uarta", "serial-tegra.0", NULL, 6, 0x178, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB), PERIPH_CLK("uartb", "serial-tegra.1", NULL, 7, 0x17c, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB), PERIPH_CLK("uartc", "serial-tegra.2", NULL, 55, 0x1a0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB), PERIPH_CLK("uartd", "serial-tegra.3", NULL, 65, 0x1c0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U151 | DIV_U151_UART | PERIPH_ON_APB), PERIPH_CLK("vic03", "vic03", NULL, 178, 0x678, 900000000, mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, MUX | DIV_U71), PERIPH_CLK_EX("vi", "vi", "vi", 20, 0x148, 700000000, mux_pllm_pllc_pllp_plla_pllc4, MUX | DIV_U71 | DIV_U71_INT, &tegra_vi_clk_ops), PERIPH_CLK("vi_sensor", NULL, "vi_sensor", 164, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("vi_sensor2", NULL, "vi_sensor2", 165, 0x658, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK_EX("msenc", "msenc", NULL, 91, 0x1f0, 600000000, mux_pllm_pllc2_c_c3_pllp_plla, MUX | DIV_U71 | DIV_U71_INT, &tegra_msenc_clk_ops), PERIPH_CLK("tsec", "tsec", NULL, 83, 0x1f4, 900000000, mux_pllp_pllc2_c_c3_pllm_clkm, MUX | DIV_U71 | DIV_U71_INT), PERIPH_CLK_EX("dtv", "dtv", NULL, 79, 0x1dc, 250000000, mux_clk_m, PERIPH_ON_APB, &tegra_dtv_clk_ops), PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 594000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | DIV_U71), PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX), PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX), PERIPH_CLK_EX("sor0", "sor0", NULL, 182, 0x414, 540000000, mux_pllp_pllm_plld_plla_pllc_plld2_clkm, MUX | DIV_U71, &tegra_sor_clk_ops), PERIPH_CLK("dpaux", "dpaux", NULL, 181, 0, 24000000, mux_clk_m, 0), PERIPH_CLK("usbd", "tegra-udc.0", NULL, 22, 0, 480000000, mux_clk_m, 0), PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), PERIPH_CLK_EX("dsia", "tegradc.0", "dsia", 48, 0xd0, 750000000, mux_plld_out0, PLLD, &tegra_dsi_clk_ops), PERIPH_CLK_EX("dsib", "tegradc.1", "dsib", 82, 0x4b8, 750000000, mux_plld_out0, PLLD, &tegra_dsi_clk_ops), PERIPH_CLK("dsi1-fixed", "tegradc.0", "dsi-fixed", 0, 0, 108000000, mux_pllp_out3, PERIPH_NO_ENB), PERIPH_CLK("dsi2-fixed", "tegradc.1", "dsi-fixed", 0, 0, 108000000, mux_pllp_out3, PERIPH_NO_ENB), PERIPH_CLK("csi", "vi", "csi", 52, 0, 750000000, mux_plld, PLLD), PERIPH_CLK("ispa", "isp", "ispa", 23, 0, 700000000, mux_isp, PERIPH_ON_APB), PERIPH_CLK("ispb", "isp", "ispb", 3, 0, 700000000, mux_isp, PERIPH_ON_APB), PERIPH_CLK("csus", "vi", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), PERIPH_CLK("vim2_clk", "vi", "vim2_clk", 171, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), PERIPH_CLK("cilab", "vi", "cilab", 144, 0x614, 102000000, mux_pllp_pllc_clkm, MUX | DIV_U71), PERIPH_CLK("cilcd", "vi", "cilcd", 145, 0x618, 102000000, mux_pllp_pllc_clkm, MUX | DIV_U71), PERIPH_CLK("cile", "vi", "cile", 146, 0x61c, 102000000, mux_pllp_pllc_clkm, MUX | DIV_U71), PERIPH_CLK("dsialp", "tegradc.0", "dsialp", 147, 0x620, 156000000, mux_pllp_pllc_clkm, MUX | DIV_U71), PERIPH_CLK("dsiblp", "tegradc.1", "dsiblp", 148, 0x624, 156000000, mux_pllp_pllc_clkm, MUX | DIV_U71), PERIPH_CLK("entropy", "entropy", NULL, 149, 0x628, 102000000, mux_pllp_clkm_clk32_plle, MUX | DIV_U71), PERIPH_CLK("hdmi_audio", "hdmi_audio", NULL, 176, 0x668, 48000000, mux_pllp_pllc_clkm1, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("clk72mhz", "clk72mhz", NULL, 177, 0x66c, 102000000, mux_pllp3_pllc_clkm, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("tsensor", "tegra-tsensor", NULL, 100, 0x3b8, 12000000, mux_pllp_pllc_clkm_clk32, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("actmon", "actmon", NULL, 119, 0x3e8, 216000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71), PERIPH_CLK("extern1", "extern1", NULL, 120, 0x3ec, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | DIV_U71), PERIPH_CLK("extern2", "extern2", NULL, 121, 0x3f0, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | DIV_U71), PERIPH_CLK("extern3", "extern3", NULL, 122, 0x3f4, 216000000, mux_plla_clk32_pllp_clkm_plle, MUX | DIV_U71), PERIPH_CLK("i2cslow", "i2cslow", NULL, 81, 0x3fc, 26000000, mux_pllp_pllc_clk32_clkm, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("pcie", "tegra-pcie", "pcie", 70, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("afi", "tegra-pcie", "afi", 72, 0, 250000000, mux_clk_m, 0), PERIPH_CLK("se", "se", NULL, 127, 0x42c, 600000000, mux_pllp_pllc2_c_c3_pllm_clkm, MUX | DIV_U71 | DIV_U71_INT | PERIPH_ON_APB), PERIPH_CLK("cl_dvfs_ref", "tegra_cl_dvfs", "ref", 155, 0x62c, 54000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_INT | PERIPH_ON_APB), PERIPH_CLK("cl_dvfs_soc", "tegra_cl_dvfs", "soc", 155, 0x630, 54000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_INT | PERIPH_ON_APB), PERIPH_CLK("soc_therm", "soc_therm", NULL, 78, 0x644, 136000000, mux_pllm_pllc_pllp_plla_v2, MUX | DIV_U71 | PERIPH_ON_APB), PERIPH_CLK("dds", "dds", NULL, 150, 0, 26000000, mux_clk_m, PERIPH_ON_APB), PERIPH_CLK("dp2", "dp2", NULL, 152, 0, 26000000, mux_clk_m, PERIPH_ON_APB), SHARED_SCLK("automotive.hclk", "automotive", "hclk", &tegra_clk_ahb, NULL, 0, 0), SHARED_SCLK("automotive.pclk", "automotive", "pclk", &tegra_clk_apb, NULL, 0, 0), SHARED_SCLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("bsea.sclk", "tegra-aes", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("usbd.sclk", "tegra-udc.0", "sclk", &tegra_clk_ahb, NULL, 0, 0), SHARED_SCLK("usb1.sclk", "tegra-ehci.0", "sclk", &tegra_clk_ahb, NULL, 0, 0), SHARED_SCLK("usb2.sclk", "tegra-ehci.1", "sclk", &tegra_clk_ahb, NULL, 0, 0), SHARED_SCLK("usb3.sclk", "tegra-ehci.2", "sclk", &tegra_clk_ahb, NULL, 0, 0), SHARED_SCLK("wake.sclk", "wake_sclk", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("automotive.sclk", "automotive", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("via.sclk", "tegra_vi.0", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("vib.sclk", "tegra_vi.1", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("ispa.sclk", "tegra_isp.0", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("ispb.sclk", "tegra_isp.1", "sclk", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("mon.avp", "tegra_actmon", "avp", &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("cap.sclk", "cap_sclk", NULL, &tegra_clk_sbus_cmplx, NULL, 0, SHARED_CEILING), SHARED_SCLK("cap.vcore.sclk", "cap.vcore.sclk", NULL, &tegra_clk_sbus_cmplx, NULL, 0, SHARED_CEILING), SHARED_SCLK("cap.throttle.sclk", "cap_throttle", NULL, &tegra_clk_sbus_cmplx, NULL, 0, SHARED_CEILING), SHARED_SCLK("floor.sclk", "floor_sclk", NULL, &tegra_clk_sbus_cmplx, NULL, 0, 0), SHARED_SCLK("override.sclk", "override_sclk", NULL, &tegra_clk_sbus_cmplx, NULL, 0, SHARED_OVERRIDE), SHARED_SCLK("sbc1.sclk", "tegra12-spi.0", "sclk", &tegra_clk_apb, NULL, 0, 0), SHARED_SCLK("sbc2.sclk", "tegra12-spi.1", "sclk", &tegra_clk_apb, NULL, 0, 0), SHARED_SCLK("sbc3.sclk", "tegra12-spi.2", "sclk", &tegra_clk_apb, NULL, 0, 0), SHARED_SCLK("sbc4.sclk", "tegra12-spi.3", "sclk", &tegra_clk_apb, NULL, 0, 0), SHARED_SCLK("sbc5.sclk", "tegra12-spi.4", "sclk", &tegra_clk_apb, NULL, 0, 0), SHARED_SCLK("sbc6.sclk", "tegra12-spi.5", "sclk", &tegra_clk_apb, NULL, 0, 0), SHARED_EMC_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("mon_cpu.emc", "tegra_mon", "cpu_emc", &tegra_clk_emc, NULL, 0, 0, 0), #ifdef CONFIG_ARCH_TEGRA_13x_SOC SHARED_EMC_CLK("cpu.emc", "tegra-cpu", "cpu_emc", &tegra_clk_emc, NULL, 0, 0, 0), #endif SHARED_EMC_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc, NULL, 0, SHARED_ISO_BW, BIT(EMC_USER_DC1)), SHARED_EMC_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc, NULL, 0, SHARED_ISO_BW, BIT(EMC_USER_DC2)), SHARED_EMC_CLK("disp1.la.emc", "tegradc.0", "emc.la", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("disp2.la.emc", "tegradc.1", "emc.la", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("usbd.emc", "tegra-udc.0", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("usb3.emc", "tegra-ehci.2", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("sdmmc3.emc", "sdhci-tegra.2","emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("sdmmc4.emc", "sdhci-tegra.3","emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("mon.emc", "tegra_actmon", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("cap.emc", "cap.emc", NULL, &tegra_clk_emc, NULL, 0, SHARED_CEILING, 0), SHARED_EMC_CLK("cap.vcore.emc", "cap.vcore.emc", NULL, &tegra_clk_emc, NULL, 0, SHARED_CEILING, 0), SHARED_EMC_CLK("cap.throttle.emc", "cap_throttle", NULL, &tegra_clk_emc, NULL, 0, SHARED_CEILING_BUT_ISO, 0), SHARED_EMC_CLK("3d.emc", "tegra_gk20a.0", "emc", &tegra_clk_emc, NULL, 0, 0, BIT(EMC_USER_3D)), SHARED_EMC_CLK("msenc.emc", "tegra_msenc", "emc", &tegra_clk_emc, NULL, 0, SHARED_BW, BIT(EMC_USER_MSENC)), SHARED_EMC_CLK("tsec.emc", "tegra_tsec", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("via.emc", "tegra_vi.0", "emc", &tegra_clk_emc, NULL, 0, SHARED_ISO_BW, BIT(EMC_USER_VI)), SHARED_EMC_CLK("vib.emc", "tegra_vi.1", "emc", &tegra_clk_emc, NULL, 0, SHARED_ISO_BW, BIT(EMC_USER_VI2)), SHARED_EMC_CLK("ispa.emc", "tegra_isp.0", "emc", &tegra_clk_emc, NULL, 0, SHARED_ISO_BW, BIT(EMC_USER_ISP1)), SHARED_EMC_CLK("ispb.emc", "tegra_isp.1", "emc", &tegra_clk_emc, NULL, 0, SHARED_ISO_BW, BIT(EMC_USER_ISP2)), SHARED_EMC_CLK("iso.emc", "iso", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("override.emc", "override.emc", NULL, &tegra_clk_emc, NULL, 0, SHARED_OVERRIDE, 0), SHARED_EMC_CLK("vic.emc", "tegra_vic03.0", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_EMC_CLK("battery.emc", "battery_edp", "emc", &tegra_clk_emc, NULL, 0, SHARED_CEILING, 0), SHARED_EMC_CLK("pcie.emc", "tegra_pcie", "emc", &tegra_clk_emc, NULL, 0, 0, 0), SHARED_LIMIT("floor.emc", "floor.emc", NULL, &tegra_clk_emc, NULL, 0, 0), SHARED_LIMIT("floor.profile.emc", "profile.emc", "floor", &tegra_clk_emc, NULL, 0, 0), #ifdef CONFIG_TEGRA_DUAL_CBUS DUAL_CBUS_CLK("msenc.cbus", "tegra_msenc", "msenc", &tegra_clk_c2bus, "msenc", 0, 0), DUAL_CBUS_CLK("vde.cbus", "tegra-avp", "vde", &tegra_clk_c2bus, "vde", 0, 0), DUAL_CBUS_CLK("se.cbus", "tegra12-se", NULL, &tegra_clk_c2bus, "se", 0, 0), SHARED_LIMIT("cap.c2bus", "cap.c2bus", NULL, &tegra_clk_c2bus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.vcore.c2bus", "cap.vcore.c2bus", NULL, &tegra_clk_c2bus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.throttle.c2bus", "cap_throttle", NULL, &tegra_clk_c2bus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("floor.c2bus", "floor.c2bus", NULL, &tegra_clk_c2bus, NULL, 0, 0), SHARED_CLK("override.c2bus", "override.c2bus", NULL, &tegra_clk_c2bus, NULL, 0, SHARED_OVERRIDE), DUAL_CBUS_CLK("vic03.cbus", "tegra_vic03.0", "vic03", &tegra_clk_c3bus, "vic03", 0, 0), DUAL_CBUS_CLK("tsec.cbus", "tegra_tsec", "tsec", &tegra_clk_c3bus, "tsec", 0, 0), SHARED_LIMIT("cap.c3bus", "cap.c3bus", NULL, &tegra_clk_c3bus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.vcore.c3bus", "cap.vcore.c3bus", NULL, &tegra_clk_c3bus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.throttle.c3bus", "cap_throttle", NULL, &tegra_clk_c3bus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("floor.c3bus", "floor.c3bus", NULL, &tegra_clk_c3bus, NULL, 0, 0), SHARED_CLK("override.c3bus", "override.c3bus", NULL, &tegra_clk_c3bus, NULL, 0, SHARED_OVERRIDE), #else SHARED_CLK("vic03.cbus", "tegra_vic03.0", "vic03", &tegra_clk_cbus, "vic03", 0, 0), SHARED_CLK("msenc.cbus","tegra_msenc", "msenc",&tegra_clk_cbus, "msenc", 0, 0), SHARED_CLK("tsec.cbus", "tegra_tsec", "tsec", &tegra_clk_cbus, "tsec", 0, 0), SHARED_CLK("vde.cbus", "tegra-avp", "vde", &tegra_clk_cbus, "vde", 0, 0), SHARED_CLK("se.cbus", "tegra12-se", NULL, &tegra_clk_cbus, "se", 0, 0), SHARED_LIMIT("cap.cbus", "cap.cbus", NULL, &tegra_clk_cbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.vcore.cbus", "cap.vcore.cbus", NULL, &tegra_clk_cbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.throttle.cbus", "cap_throttle", NULL, &tegra_clk_cbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("floor.cbus", "floor.cbus", NULL, &tegra_clk_cbus, NULL, 0, 0), SHARED_CLK("override.cbus", "override.cbus", NULL, &tegra_clk_cbus, NULL, 0, SHARED_OVERRIDE), #endif SHARED_CLK("gk20a.gbus", "tegra_gk20a", "gpu", &tegra_clk_gbus, NULL, 0, 0), SHARED_LIMIT("cap.gbus", "cap.gbus", NULL, &tegra_clk_gbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("edp.gbus", "edp.gbus", NULL, &tegra_clk_gbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("battery.gbus", "battery_edp", "gpu", &tegra_clk_gbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.throttle.gbus", "cap_throttle", NULL, &tegra_clk_gbus, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.profile.gbus", "profile.gbus", "cap", &tegra_clk_gbus, NULL, 0, SHARED_CEILING), SHARED_CLK("override.gbus", "override.gbus", NULL, &tegra_clk_gbus, NULL, 0, SHARED_OVERRIDE), SHARED_LIMIT("floor.gbus", "floor.gbus", NULL, &tegra_clk_gbus, NULL, 0, 0), SHARED_LIMIT("floor.profile.gbus", "profile.gbus", "floor", &tegra_clk_gbus, NULL, 0, 0), SHARED_CLK("automotive.host1x", "automotive", "host1x", &tegra_clk_host1x, NULL, 0, 0), SHARED_CLK("nv.host1x", "tegra_host1x", "host1x", &tegra_clk_host1x, NULL, 0, 0), SHARED_CLK("vi.host1x", "tegra_vi", "host1x", &tegra_clk_host1x, NULL, 0, 0), SHARED_LIMIT("cap.host1x", "cap.host1x", NULL, &tegra_clk_host1x, NULL, 0, SHARED_CEILING), SHARED_LIMIT("cap.vcore.host1x", "cap.vcore.host1x", NULL, &tegra_clk_host1x, NULL, 0, SHARED_CEILING), SHARED_LIMIT("floor.host1x", "floor.host1x", NULL, &tegra_clk_host1x, NULL, 0, 0), SHARED_CLK("override.host1x", "override.host1x", NULL, &tegra_clk_host1x, NULL, 0, SHARED_OVERRIDE), SHARED_CLK("cpu.mselect", "cpu", "mselect", &tegra_clk_mselect, NULL, 0, 0), SHARED_CLK("pcie.mselect", "tegra_pcie", "mselect", &tegra_clk_mselect, NULL, 0, 0), SHARED_CLK("automotive.mselect", "automotive", "mselect", &tegra_clk_mselect, NULL, 0, 0), SHARED_LIMIT("cap.vcore.mselect", "cap.vcore.mselect", NULL, &tegra_clk_mselect, NULL, 0, SHARED_CEILING), SHARED_CLK("override.mselect", "override.mselect", NULL, &tegra_clk_mselect, NULL, 0, SHARED_OVERRIDE), }; /* VI, ISP buses */ static struct clk tegra_visp_clks[] = { SHARED_CONNECT("vi.c4bus", "vi.c4bus", NULL, &tegra_clk_c4bus, "vi", 0, 0), SHARED_CONNECT("isp.c4bus", "isp.c4bus", NULL, &tegra_clk_c4bus, "isp", 0, 0), SHARED_CLK("override.c4bus", "override.c4bus", NULL, &tegra_clk_c4bus, NULL, 0, SHARED_OVERRIDE), SHARED_CLK("via.vi.c4bus", "via.vi", NULL, &tegra_visp_clks[0], NULL, 0, 0), SHARED_CLK("vib.vi.c4bus", "vib.vi", NULL, &tegra_visp_clks[0], NULL, 0, 0), SHARED_CLK("ispa.isp.c4bus", "ispa.isp", NULL, &tegra_visp_clks[1], "ispa", 0, 0), SHARED_CLK("ispb.isp.c4bus", "ispb.isp", NULL, &tegra_visp_clks[1], "ispb", 0, 0), }; /* XUSB clocks */ #define XUSB_ID "tegra-xhci" /* xusb common clock gate - enabled on init and never disabled */ static void tegra12_xusb_gate_clk_init(struct clk *c) { tegra12_periph_clk_enable(c); } static struct clk_ops tegra_xusb_gate_clk_ops = { .init = tegra12_xusb_gate_clk_init, }; static struct clk tegra_clk_xusb_gate = { .name = "xusb_gate", .flags = ENABLE_ON_INIT | PERIPH_NO_RESET, .ops = &tegra_xusb_gate_clk_ops, .rate = 12000000, .max_rate = 48000000, .u.periph = { .clk_num = 143, }, }; static struct clk tegra_xusb_source_clks[] = { PERIPH_CLK("xusb_host_src", XUSB_ID, "host_src", 143, 0x600, 112000000, mux_clkm_pllp_pllc_pllre, MUX | DIV_U71 | PERIPH_NO_RESET | PERIPH_ON_APB), PERIPH_CLK("xusb_falcon_src", XUSB_ID, "falcon_src", 143, 0x604, 336000000, mux_clkm_pllp_pllc_pllre, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("xusb_fs_src", XUSB_ID, "fs_src", 143, 0x608, 48000000, mux_clkm_48M_pllp_480M, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("xusb_ss_src", XUSB_ID, "ss_src", 143, 0x610, 120000000, mux_clkm_pllre_clk32_480M_pllc_ref, MUX | DIV_U71 | PERIPH_NO_RESET), PERIPH_CLK("xusb_dev_src", XUSB_ID, "dev_src", 95, 0x60c, 112000000, mux_clkm_pllp_pllc_pllre, MUX | DIV_U71 | PERIPH_NO_RESET | PERIPH_ON_APB), SHARED_EMC_CLK("xusb.emc", XUSB_ID, "emc", &tegra_clk_emc, NULL, 0, SHARED_BW, 0), }; static struct clk tegra_xusb_ss_div2 = { .name = "xusb_ss_div2", .ops = &tegra_clk_m_div_ops, .parent = &tegra_xusb_source_clks[3], .mul = 1, .div = 2, .state = OFF, .max_rate = 61200000, }; static struct clk_mux_sel mux_ss_div2_pllu_60M[] = { { .input = &tegra_xusb_ss_div2, .value = 0}, { .input = &tegra_pll_u_60M, .value = 1}, { 0, 0}, }; static struct clk tegra_xusb_hs_src = { .name = "xusb_hs_src", .lookup = { .dev_id = XUSB_ID, .con_id = "hs_src", }, .ops = &tegra_periph_clk_ops, .reg = 0x610, .inputs = mux_ss_div2_pllu_60M, .flags = MUX | PLLU | PERIPH_NO_ENB, .max_rate = 60000000, .u.periph = { .src_mask = 0x1 << 25, .src_shift = 25, }, }; static struct clk_mux_sel mux_xusb_host[] = { { .input = &tegra_xusb_source_clks[0], .value = 0}, { .input = &tegra_xusb_source_clks[1], .value = 1}, { .input = &tegra_xusb_source_clks[2], .value = 2}, { .input = &tegra_xusb_hs_src, .value = 5}, { 0, 0}, }; static struct clk_mux_sel mux_xusb_ss[] = { { .input = &tegra_xusb_source_clks[3], .value = 3}, { .input = &tegra_xusb_source_clks[0], .value = 0}, { .input = &tegra_xusb_source_clks[1], .value = 1}, { 0, 0}, }; static struct clk_mux_sel mux_xusb_dev[] = { { .input = &tegra_xusb_source_clks[4], .value = 4}, { .input = &tegra_xusb_source_clks[2], .value = 2}, { .input = &tegra_xusb_source_clks[3], .value = 3}, { 0, 0}, }; static struct clk tegra_xusb_coupled_clks[] = { PERIPH_CLK_EX("xusb_host", XUSB_ID, "host", 89, 0, 350000000, mux_xusb_host, 0, &tegra_clk_coupled_gate_ops), PERIPH_CLK_EX("xusb_ss", XUSB_ID, "ss", 156, 0, 350000000, mux_xusb_ss, 0, &tegra_clk_coupled_gate_ops), PERIPH_CLK_EX("xusb_dev", XUSB_ID, "dev", 95, 0, 120000000, mux_xusb_dev, 0, &tegra_clk_coupled_gate_ops), }; #define CLK_DUPLICATE(_name, _dev, _con) \ { \ .name = _name, \ .lookup = { \ .dev_id = _dev, \ .con_id = _con, \ }, \ } /* Some clocks may be used by different drivers depending on the board * configuration. List those here to register them twice in the clock lookup * table under two names. */ struct clk_duplicate tegra_clk_duplicates[] = { CLK_DUPLICATE("uarta", "serial8250.0", NULL), CLK_DUPLICATE("uartb", "serial8250.1", NULL), CLK_DUPLICATE("uartc", "serial8250.2", NULL), CLK_DUPLICATE("uartd", "serial8250.3", NULL), CLK_DUPLICATE("usbd", XUSB_ID, "utmip-pad"), CLK_DUPLICATE("usbd", "utmip-pad", NULL), CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL), CLK_DUPLICATE("usbd", "tegra-otg", NULL), CLK_DUPLICATE("disp1", "tegra_dc_dsi_vs1.0", NULL), CLK_DUPLICATE("disp1.emc", "tegra_dc_dsi_vs1.0", "emc"), CLK_DUPLICATE("disp1", "tegra_dc_dsi_vs1.1", NULL), CLK_DUPLICATE("disp1.emc", "tegra_dc_dsi_vs1.1", "emc"), CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"), CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"), CLK_DUPLICATE("dsib", "tegradc.0", "dsib"), CLK_DUPLICATE("dsia", "tegradc.1", "dsia"), CLK_DUPLICATE("dsiblp", "tegradc.0", "dsiblp"), CLK_DUPLICATE("dsialp", "tegradc.1", "dsialp"), CLK_DUPLICATE("dsia", "tegra_dc_dsi_vs1.0", "dsia"), CLK_DUPLICATE("dsia", "tegra_dc_dsi_vs1.1", "dsia"), CLK_DUPLICATE("dsialp", "tegra_dc_dsi_vs1.0", "dsialp"), CLK_DUPLICATE("dsialp", "tegra_dc_dsi_vs1.1", "dsialp"), CLK_DUPLICATE("dsi1-fixed", "tegra_dc_dsi_vs1.0", "dsi-fixed"), CLK_DUPLICATE("dsi1-fixed", "tegra_dc_dsi_vs1.1", "dsi-fixed"), CLK_DUPLICATE("cop", "tegra-avp", "cop"), CLK_DUPLICATE("bsev", "tegra-avp", "bsev"), CLK_DUPLICATE("cop", "nvavp", "cop"), CLK_DUPLICATE("bsev", "nvavp", "bsev"), CLK_DUPLICATE("vde", "tegra-aes", "vde"), CLK_DUPLICATE("bsea", "tegra-aes", "bsea"), CLK_DUPLICATE("bsea", "nvavp", "bsea"), CLK_DUPLICATE("cml1", "tegra_sata_cml", NULL), CLK_DUPLICATE("cml0", "tegra_pcie", "cml"), CLK_DUPLICATE("pciex", "tegra_pcie", "pciex"), CLK_DUPLICATE("clk_m", NULL, "apb_pclk"), CLK_DUPLICATE("i2c1", "tegra-i2c-slave.0", NULL), CLK_DUPLICATE("i2c2", "tegra-i2c-slave.1", NULL), CLK_DUPLICATE("i2c3", "tegra-i2c-slave.2", NULL), CLK_DUPLICATE("i2c4", "tegra-i2c-slave.3", NULL), CLK_DUPLICATE("i2c5", "tegra-i2c-slave.4", NULL), CLK_DUPLICATE("cl_dvfs_ref", "tegra12-i2c.4", NULL), CLK_DUPLICATE("cl_dvfs_soc", "tegra12-i2c.4", NULL), CLK_DUPLICATE("sbc1", "tegra11-spi-slave.0", NULL), CLK_DUPLICATE("sbc2", "tegra11-spi-slave.1", NULL), CLK_DUPLICATE("sbc3", "tegra11-spi-slave.2", NULL), CLK_DUPLICATE("sbc4", "tegra11-spi-slave.3", NULL), CLK_DUPLICATE("sbc5", "tegra11-spi-slave.4", NULL), CLK_DUPLICATE("sbc6", "tegra11-spi-slave.5", NULL), CLK_DUPLICATE("vcp", "nvavp", "vcp"), CLK_DUPLICATE("avp.sclk", "nvavp", "sclk"), CLK_DUPLICATE("avp.emc", "nvavp", "emc"), CLK_DUPLICATE("vde.cbus", "nvavp", "vde"), CLK_DUPLICATE("i2c5", "tegra_cl_dvfs", "i2c"), CLK_DUPLICATE("cpu_g", "tegra_cl_dvfs", "safe_dvfs"), CLK_DUPLICATE("actmon", "tegra_host1x", "actmon"), CLK_DUPLICATE("gpu_ref", "tegra_gk20a.0", "PLLG_ref"), CLK_DUPLICATE("gbus", "tegra_gk20a.0", "PLLG_out"), CLK_DUPLICATE("pll_p_out5", "tegra_gk20a.0", "pwr"), CLK_DUPLICATE("ispa.isp.c4bus", "tegra_isp.0", "isp"), CLK_DUPLICATE("ispb.isp.c4bus", "tegra_isp.1", "isp"), CLK_DUPLICATE("via.vi.c4bus", "tegra_vi.0", "vi"), CLK_DUPLICATE("vib.vi.c4bus", "tegra_vi.1", "vi"), CLK_DUPLICATE("csi", "tegra_vi.0", "csi"), CLK_DUPLICATE("csi", "tegra_vi.1", "csi"), CLK_DUPLICATE("csus", "tegra_vi.0", "csus"), CLK_DUPLICATE("csus", "tegra_vi.1", "csus"), CLK_DUPLICATE("vim2_clk", "tegra_vi.0", "vim2_clk"), CLK_DUPLICATE("vim2_clk", "tegra_vi.1", "vim2_clk"), CLK_DUPLICATE("cilab", "tegra_vi.0", "cilab"), CLK_DUPLICATE("cilab", "tegra_vi.1", "cilab"), CLK_DUPLICATE("cilcd", "tegra_vi.0", "cilcd"), CLK_DUPLICATE("cilcd", "tegra_vi.1", "cilcd"), CLK_DUPLICATE("cile", "tegra_vi.0", "cile"), CLK_DUPLICATE("cile", "tegra_vi.1", "cile"), CLK_DUPLICATE("i2s0", NULL, "i2s0"), CLK_DUPLICATE("i2s1", NULL, "i2s1"), CLK_DUPLICATE("i2s2", NULL, "i2s2"), CLK_DUPLICATE("i2s3", NULL, "i2s3"), CLK_DUPLICATE("i2s4", NULL, "i2s4"), CLK_DUPLICATE("dam0", NULL, "dam0"), CLK_DUPLICATE("dam1", NULL, "dam1"), CLK_DUPLICATE("dam2", NULL, "dam2"), CLK_DUPLICATE("spdif_in", NULL, "spdif_in"), CLK_DUPLICATE("mclk", NULL, "default_mclk"), CLK_DUPLICATE("amx", NULL, "amx"), CLK_DUPLICATE("amx1", NULL, "amx1"), CLK_DUPLICATE("adx", NULL, "adx"), CLK_DUPLICATE("adx1", NULL, "adx1"), CLK_DUPLICATE("afc0", NULL, "afc0"), CLK_DUPLICATE("afc1", NULL, "afc1"), CLK_DUPLICATE("afc2", NULL, "afc2"), CLK_DUPLICATE("afc3", NULL, "afc3"), CLK_DUPLICATE("afc4", NULL, "afc4"), CLK_DUPLICATE("afc5", NULL, "afc5"), CLK_DUPLICATE("amx", "tegra124-amx.0", NULL), CLK_DUPLICATE("amx1", "tegra124-amx.1", NULL), CLK_DUPLICATE("adx", "tegra124-adx.0", NULL), CLK_DUPLICATE("adx1", "tegra124-adx.1", NULL), CLK_DUPLICATE("amx", "tegra30-ahub-apbif", "amx"), CLK_DUPLICATE("amx1", "tegra30-ahub-apbif", "amx1"), CLK_DUPLICATE("adx", "tegra30-ahub-apbif", "adx"), CLK_DUPLICATE("adx1", "tegra30-ahub-apbif", "adx1"), CLK_DUPLICATE("d_audio", "tegra30-ahub-xbar", "d_audio"), CLK_DUPLICATE("apbif", "tegra30-ahub-apbif", "apbif"), CLK_DUPLICATE("afc0", "tegra30-ahub-apbif", "afc0"), CLK_DUPLICATE("afc1", "tegra30-ahub-apbif", "afc1"), CLK_DUPLICATE("afc2", "tegra30-ahub-apbif", "afc2"), CLK_DUPLICATE("afc3", "tegra30-ahub-apbif", "afc3"), CLK_DUPLICATE("afc4", "tegra30-ahub-apbif", "afc4"), CLK_DUPLICATE("afc5", "tegra30-ahub-apbif", "afc5"), CLK_DUPLICATE("cpu_g", "tegra_simon", "cpu"), }; struct clk *tegra_ptr_clks[] = { &tegra_clk_32k, &tegra_clk_m, &tegra_clk_m_div2, &tegra_clk_m_div4, &tegra_pll_ref, &tegra_pll_m, &tegra_pll_m_out1, &tegra_pll_c, &tegra_pll_c_out1, &tegra_pll_c2, &tegra_pll_c3, &tegra_pll_p, &tegra_pll_p_out1, &tegra_pll_p_out2, &tegra_pll_p_out3, &tegra_pll_p_out4, &tegra_pll_p_out5, &tegra_pll_a, &tegra_pll_a_out0, &tegra_pll_d, &tegra_pll_d_out0, &tegra_clk_xusb_gate, &tegra_pll_u, &tegra_pll_u_480M, &tegra_pll_u_60M, &tegra_pll_u_48M, &tegra_pll_u_12M, &tegra_pll_x, &tegra_pll_x_out0, &tegra_dfll_cpu, &tegra_pll_d2, &tegra_pll_c4, &tegra_pll_dp, &tegra_pll_re_vco, &tegra_pll_re_out, &tegra_pll_e, &tegra_cml0_clk, &tegra_cml1_clk, &tegra_pciex_clk, &tegra_clk_cclk_g, #ifndef CONFIG_ARCH_TEGRA_13x_SOC &tegra_clk_cclk_lp, #endif &tegra_clk_sclk, &tegra_clk_hclk, &tegra_clk_pclk, &tegra_clk_virtual_cpu_g, #ifndef CONFIG_ARCH_TEGRA_13x_SOC &tegra_clk_virtual_cpu_lp, #endif &tegra_clk_cpu_cmplx, &tegra_clk_blink, &tegra_clk_cop, &tegra_clk_sbus_cmplx, &tegra_clk_ahb, &tegra_clk_apb, &tegra_clk_emc, &tegra_clk_mc, &tegra_clk_host1x, &tegra_clk_mselect, #ifdef CONFIG_TEGRA_DUAL_CBUS &tegra_clk_c2bus, &tegra_clk_c3bus, #else &tegra_clk_cbus, #endif &tegra_clk_gpu, &tegra_clk_gbus, &tegra_clk_isp, &tegra_clk_c4bus, }; struct clk *tegra_ptr_camera_mclks[] = { &tegra_camera_mclk, &tegra_camera_mclk2, }; /* * Use this API only when all the clocks are not registered to the clock * subsystem. */ static struct clk *query_clk_from_list(char *clk_name) { int i; if (!clk_name) return NULL; for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++) if (!strcmp(tegra_list_clks[i].name, clk_name)) return &tegra_list_clks[i]; return NULL; } /* * Handle special clocks to check if they can be set to safe rate */ static bool tegra12_periph_is_special_reset(struct clk *c) { struct clk *temp; if (!strcmp(c->name, "isp")) { /* Make sure that ispa and ispb are in reset */ /* * Since clocks may not have been registered by this time, * so query clock structure directly from the list */ temp = query_clk_from_list("ispa"); if (!temp) return false; /* If ispa is not in reset, return false */ if (!IS_PERIPH_IN_RESET(temp)) return false; temp = query_clk_from_list("ispb"); if (!temp) return false; /* If ispb is not in reset, return false */ if (!IS_PERIPH_IN_RESET(temp)) return false; return true; } if (!strcmp(c->name, "vi_sensor") || !strcmp(c->name, "vi_sensor2")) { temp = query_clk_from_list("vi"); if (!temp) return false; /* If vi is not in reset, return false */ if (!IS_PERIPH_IN_RESET(temp)) return false; return true; } if (!strcmp(c->name, "hdmi_audio")) { temp = query_clk_from_list("hdmi"); if (!temp) return false; /* If hdmi is not in reset, return false */ if (!IS_PERIPH_IN_RESET(temp)) return false; return true; } return false; } /* Return true from this function if the target rate can be locked without switching pll clients to back-up source */ static bool tegra12_is_dyn_ramp( struct clk *c, unsigned long rate, bool from_vco_min) { #if PLLCX_USE_DYN_RAMP /* PLLC2, PLLC3 support dynamic ramp only when output divider <= 8 */ if ((c == &tegra_pll_c2) || (c == &tegra_pll_c3)) { struct clk_pll_freq_table cfg, old_cfg; unsigned long input_rate = clk_get_rate(c->parent); u32 val = clk_readl(c->reg + PLL_BASE); PLL_BASE_PARSE(PLLCX, old_cfg, val); old_cfg.p = pllcx_p[old_cfg.p]; if (!pll_dyn_ramp_find_cfg(c, &cfg, rate, input_rate, NULL)) { if ((cfg.n == old_cfg.n) || PLLCX_IS_DYN(cfg.p, old_cfg.p)) return true; } } #endif #if PLLXC_USE_DYN_RAMP /* PPLX, PLLC support dynamic ramp when changing NDIV only */ if ((c == &tegra_pll_x) || (c == &tegra_pll_c)) { struct clk_pll_freq_table cfg, old_cfg; unsigned long input_rate = clk_get_rate(c->parent); if (from_vco_min) { old_cfg.m = PLL_FIXED_MDIV(c, input_rate); old_cfg.p = 1; } else { if (c->flags & PLLX) u32 val = clk_readlx(c->reg + PLL_BASE); else u32 val = clk_readl(c->reg + PLL_BASE); PLL_BASE_PARSE(PLLXC, old_cfg, val); old_cfg.p = pllxc_p[old_cfg.p]; } if (!pll_dyn_ramp_find_cfg(c, &cfg, rate, input_rate, NULL)) { if ((cfg.m == old_cfg.m) && (cfg.p == old_cfg.p)) return true; } } #endif return false; } /* DFLL late init called with CPU clock lock taken */ static void __init tegra12_dfll_cpu_late_init(struct clk *c) { #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS int ret; struct clk *cpu = &tegra_clk_virtual_cpu_g; if (!cpu || !cpu->dvfs) { pr_err("%s: CPU dvfs is not present\n", __func__); return; } tegra_dvfs_set_dfll_tune_trimmers(cpu->dvfs, tune_cpu_trimmers); /* release dfll clock source reset, init cl_dvfs control logic, and move dfll to initialized state, so it can be used as CPU source */ tegra_periph_reset_deassert(c); ret = tegra_init_cl_dvfs(); if (!ret) { c->state = OFF; if (tegra_platform_is_silicon()) { if (tegra_override_dfll_range != TEGRA_USE_DFLL_CDEV_CNTRL) use_dfll = CONFIG_TEGRA_USE_DFLL_RANGE; #ifdef CONFIG_ARCH_TEGRA_13x_SOC if (tegra_cpu_speedo_id() == 0) use_dfll = 0; #endif } tegra_dvfs_set_dfll_range(cpu->dvfs, use_dfll); tegra_cl_dvfs_debug_init(c); pr_info("Tegra CPU DFLL is initialized with use_dfll = %d\n", use_dfll); } #endif } /* * Backup pll is used as transitional CPU clock source while main pll is * relocking; in addition all CPU rates below backup level are sourced from * backup pll only. Target backup levels for each CPU mode are selected high * enough to avoid voltage droop when CPU clock is switched between backup and * main plls. Actual backup rates will be rounded to match backup source fixed * frequency. Backup rates are also used as stay-on-backup thresholds, and must * be kept the same in G and LP mode (will need to add a separate stay-on-backup * parameter to allow different backup rates if necessary). * * Sbus threshold must be exact factor of pll_p rate. */ #define CPU_G_BACKUP_RATE_TARGET 200000000 #define CPU_LP_BACKUP_RATE_TARGET 200000000 static void tegra12_pllp_init_dependencies(unsigned long pllp_rate) { #ifndef CONFIG_ARCH_TEGRA_13x_SOC u32 div; unsigned long backup_rate; #endif switch (pllp_rate) { case 216000000: tegra_pll_p_out1.u.pll_div.default_rate = 28800000; tegra_pll_p_out3.u.pll_div.default_rate = 72000000; tegra_clk_sbus_cmplx.u.system.threshold = 108000000; tegra_clk_host1x.u.periph.threshold = 108000000; break; case 408000000: tegra_pll_p_out1.u.pll_div.default_rate = 9600000; tegra_pll_p_out3.u.pll_div.default_rate = 102000000; tegra_clk_sbus_cmplx.u.system.threshold = 204000000; tegra_clk_host1x.u.periph.threshold = 204000000; break; case 204000000: tegra_pll_p_out1.u.pll_div.default_rate = 4800000; tegra_pll_p_out3.u.pll_div.default_rate = 102000000; tegra_clk_sbus_cmplx.u.system.threshold = 204000000; tegra_clk_host1x.u.periph.threshold = 204000000; break; default: pr_err("tegra: PLLP rate: %lu is not supported\n", pllp_rate); BUG(); } pr_info("tegra: PLLP fixed rate: %lu\n", pllp_rate); #ifndef CONFIG_ARCH_TEGRA_13x_SOC div = pllp_rate / CPU_G_BACKUP_RATE_TARGET; backup_rate = pllp_rate / div; tegra_clk_virtual_cpu_g.u.cpu.backup_rate = backup_rate; div = pllp_rate / CPU_LP_BACKUP_RATE_TARGET; backup_rate = pllp_rate / div; tegra_clk_virtual_cpu_lp.u.cpu.backup_rate = backup_rate; #else tegra_clk_virtual_cpu_g.u.cpu.backup_rate = pllp_rate; #endif } static void tegra12_init_one_clock(struct clk *c) { clk_init(c); INIT_LIST_HEAD(&c->shared_bus_list); if (!c->lookup.dev_id && !c->lookup.con_id) c->lookup.con_id = c->name; c->lookup.clk = c; clkdev_add(&c->lookup); } /* Direct access to CPU clock sources fot CPU idle driver */ int tegra12_cpu_g_idle_rate_exchange(unsigned long *rate) { int ret = 0; struct clk *dfll = tegra_clk_cpu_cmplx.parent->u.cpu.dynamic; unsigned long old_rate, new_rate, flags; if (!dfll || !tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail)) return -EPERM; /* Clipping min to oscillator rate is pretty much arbitrary */ new_rate = max(*rate, tegra_clk_m.rate); clk_lock_save(dfll, &flags); old_rate = clk_get_rate_locked(dfll); *rate = old_rate; if (new_rate != old_rate) ret = clk_set_rate_locked(dfll, new_rate); clk_unlock_restore(dfll, &flags); return ret; } int tegra12_cpu_lp_idle_rate_exchange(unsigned long *rate) { int ret = 0; struct clk *backup = tegra_clk_cpu_cmplx.parent->u.cpu.backup; unsigned long old_rate, flags; unsigned long new_rate = min( *rate, tegra_clk_cpu_cmplx.parent->u.cpu.backup_rate); clk_lock_save(backup, &flags); old_rate = clk_get_rate_locked(backup); *rate = old_rate; if (new_rate != old_rate) ret = clk_set_rate_locked(backup, new_rate); clk_unlock_restore(backup, &flags); return ret; } void tegra_edp_throttle_cpu_now(u8 factor) { /* empty definition for tegra12 */ return; } bool tegra_clk_is_parent_allowed(struct clk *c, struct clk *p) { /* * Most of the Tegra12 multimedia and peripheral muxes include pll_c2 * and pll_c3 as possible inputs. However, per clock policy these plls * are allowed to be used only by handful devices aggregated on cbus. * For all others, instead of enforcing policy at run-time in this * function, we simply stripped out pll_c2 and pll_c3 options from the * respective muxes statically. */ /* * In configuration with dual cbus pll_c can be used as a scaled clock * source for EMC only when pll_m is fixed, or as a general fixed rate * clock source for EMC and other peripherals if pll_m is scaled. In * configuration with single cbus pll_c can be used as a scaled cbus * clock source only. No direct use for pll_c by super clocks. */ if ((p == &tegra_pll_c) && (c != &tegra_pll_c_out1)) { if (c->ops == &tegra_super_ops) return false; #ifdef CONFIG_TEGRA_DUAL_CBUS #ifndef CONFIG_TEGRA_PLLM_SCALED return c->flags & PERIPH_EMC_ENB; #endif #else return c->flags & PERIPH_ON_CBUS; #endif } /* * In any configuration pll_m must not be used as a clock source for * cbus modules. If pll_m is scaled it can be used as EMC source only. * Otherwise fixed rate pll_m can be used as clock source for EMC and * other peripherals. No direct use for pll_m by super clocks. */ if ((p == &tegra_pll_m) && (c != &tegra_pll_m_out1)) { if (c->ops == &tegra_super_ops) return false; if (c->flags & PERIPH_ON_CBUS) return false; #ifdef CONFIG_TEGRA_PLLM_SCALED return c->flags & PERIPH_EMC_ENB; #endif } return true; } /* Internal LA may request some clocks to be enabled on init via TRANSACTION SCRATCH register settings */ void __init tegra12x_clk_init_la(void) { struct clk *c; u32 reg = readl((void *) ((uintptr_t)misc_gp_base + MISC_GP_TRANSACTOR_SCRATCH_0)); if (!(reg & MISC_GP_TRANSACTOR_SCRATCH_LA_ENABLE)) return; c = tegra_get_clock_by_name("la"); if (WARN(!c, "%s: could not find la clk\n", __func__)) return; clk_enable(c); if (reg & MISC_GP_TRANSACTOR_SCRATCH_DDS_ENABLE) { c = tegra_get_clock_by_name("dds"); if (WARN(!c, "%s: could not find la clk\n", __func__)) return; clk_enable(c); } if (reg & MISC_GP_TRANSACTOR_SCRATCH_DP2_ENABLE) { c = tegra_get_clock_by_name("dp2"); if (WARN(!c, "%s: could not find la clk\n", __func__)) return; clk_enable(c); c = tegra_get_clock_by_name("hdmi"); if (WARN(!c, "%s: could not find la clk\n", __func__)) return; clk_enable(c); } } #ifdef CONFIG_CPU_FREQ /* * Frequency table index must be sequential starting at 0 and frequencies * must be ascending. */ #define CPU_FREQ_STEP 102000 /* 102MHz cpu_g table step */ #define CPU_FREQ_TABLE_MAX_SIZE (2 * MAX_DVFS_FREQS + 1) static struct cpufreq_frequency_table freq_table[CPU_FREQ_TABLE_MAX_SIZE]; static struct tegra_cpufreq_table_data freq_table_data; #ifndef CONFIG_ARCH_TEGRA_13x_SOC struct tegra_cpufreq_table_data *tegra_cpufreq_table_get(void) { int i, j; bool g_vmin_done = false; unsigned int freq, lp_backup_freq, g_vmin_freq, g_start_freq, max_freq; struct clk *cpu_clk_g = tegra_get_clock_by_name("cpu_g"); struct clk *cpu_clk_lp = tegra_get_clock_by_name("cpu_lp"); /* Initialize once */ if (freq_table_data.freq_table) return &freq_table_data; /* Clean table */ for (i = 0; i < CPU_FREQ_TABLE_MAX_SIZE; i++) { freq_table[i].index = i; freq_table[i].frequency = CPUFREQ_TABLE_END; } lp_backup_freq = cpu_clk_lp->u.cpu.backup_rate / 1000; if (!lp_backup_freq) { WARN(1, "%s: cannot make cpufreq table: no LP CPU backup rate\n", __func__); return NULL; } if (!cpu_clk_lp->dvfs) { WARN(1, "%s: cannot make cpufreq table: no LP CPU dvfs\n", __func__); return NULL; } if (!cpu_clk_g->dvfs) { WARN(1, "%s: cannot make cpufreq table: no G CPU dvfs\n", __func__); return NULL; } g_vmin_freq = cpu_clk_g->dvfs->freqs[0] / 1000; if (g_vmin_freq < lp_backup_freq) { WARN(1, "%s: cannot make cpufreq table: LP CPU backup rate" " exceeds G CPU rate at Vmin\n", __func__); return NULL; } /* Avoid duplicate frequency if g_vim_freq is already part of table */ if (g_vmin_freq == lp_backup_freq) g_vmin_done = true; /* Start with backup frequencies */ i = 0; freq = lp_backup_freq; freq_table[i++].frequency = freq/4; freq_table[i++].frequency = freq/2; freq_table[i++].frequency = freq; /* Throttle low index at backup level*/ freq_table_data.throttle_lowest_index = i - 1; /* * Next, set table steps along LP CPU dvfs ladder, but make sure G CPU * dvfs rate at minimum voltage is not missed (if it happens to be below * LP maximum rate) */ max_freq = cpu_clk_lp->max_rate / 1000; for (j = 0; j < cpu_clk_lp->dvfs->num_freqs; j++) { freq = cpu_clk_lp->dvfs->freqs[j] / 1000; if (freq <= lp_backup_freq) continue; if (!g_vmin_done && (freq >= g_vmin_freq)) { g_vmin_done = true; if (freq > g_vmin_freq) freq_table[i++].frequency = g_vmin_freq; } freq_table[i++].frequency = freq; if (freq == max_freq) break; } /* Set G CPU min rate at least one table step below LP maximum */ cpu_clk_g->min_rate = min(freq_table[i-2].frequency, g_vmin_freq)*1000; /* Suspend index at max LP CPU */ freq_table_data.suspend_index = i - 1; /* Fill in "hole" (if any) between LP CPU maximum rate and G CPU dvfs ladder rate at minimum voltage */ if (freq < g_vmin_freq) { int n = (g_vmin_freq - freq) / CPU_FREQ_STEP; for (j = 0; j <= n; j++) { freq = g_vmin_freq - CPU_FREQ_STEP * (n - j); freq_table[i++].frequency = freq; } } /* Now, step along the rest of G CPU dvfs ladder */ g_start_freq = freq; max_freq = cpu_clk_g->max_rate / 1000; for (j = 0; j < cpu_clk_g->dvfs->num_freqs; j++) { freq = cpu_clk_g->dvfs->freqs[j] / 1000; if (freq > g_start_freq) freq_table[i++].frequency = freq; if (freq == max_freq) break; } /* Throttle high index one step below maximum */ BUG_ON(i >= CPU_FREQ_TABLE_MAX_SIZE); freq_table_data.throttle_highest_index = i - 2; freq_table_data.freq_table = freq_table; return &freq_table_data; } #else #define GRANULARITY_KHZ 25500 #define GRANULARITY_END 1020000 #define CPU_THROTTLE_FREQ 408000 #define CPU_SUSPEND_FREQ 408000 struct tegra_cpufreq_table_data *tegra_cpufreq_table_get(void) { int i, j; unsigned int freq, max_freq, cpu_min_freq; struct clk *cpu_clk_g = tegra_get_clock_by_name("cpu_g"); /* Initialize once */ if (freq_table_data.freq_table) return &freq_table_data; /* Clean table */ for (i = 0; i < CPU_FREQ_TABLE_MAX_SIZE; i++) { freq_table[i].index = i; freq_table[i].frequency = CPUFREQ_TABLE_END; } if (!cpu_clk_g->dvfs) { WARN(1, "%s: cannot make cpufreq table: no CPU dvfs\n", __func__); return NULL; } cpu_min_freq = 204000; cpu_clk_g->min_rate = cpu_min_freq*1000; i = 0; freq_table[i++].frequency = cpu_min_freq; for (j=1; j <= (GRANULARITY_END - cpu_min_freq)/GRANULARITY_KHZ; j++) freq_table[i++].frequency = cpu_min_freq + j*GRANULARITY_KHZ; /* Now, step along the rest of G CPU dvfs ladder */ max_freq = cpu_clk_g->max_rate / 1000; for (j = 0; j < cpu_clk_g->dvfs->num_freqs; j++) { freq = cpu_clk_g->dvfs->freqs[j] / 1000; if (freq > GRANULARITY_END) freq_table[i++].frequency = freq; if (freq == max_freq) break; } freq_table_data.throttle_lowest_index = 0; freq_table_data.suspend_index = 0; for (j = 1; j < i; j++) { if ((freq_table[j].frequency > CPU_THROTTLE_FREQ) && (freq_table[j-1].frequency <= CPU_THROTTLE_FREQ)) freq_table_data.throttle_lowest_index = j - 1; if ((freq_table[j].frequency > CPU_SUSPEND_FREQ) && (freq_table[j-1].frequency <= CPU_SUSPEND_FREQ)) freq_table_data.suspend_index = j - 1; } /* Throttle high index one step below maximum */ BUG_ON(i >= CPU_FREQ_TABLE_MAX_SIZE); freq_table_data.throttle_highest_index = i - 2; freq_table_data.freq_table = freq_table; return &freq_table_data; } #endif /* EMC/CPU frequency ratio for power/performance optimization */ unsigned long tegra_emc_to_cpu_ratio(unsigned long cpu_rate) { static unsigned long emc_max_rate; if (emc_max_rate == 0) emc_max_rate = clk_round_rate( tegra_get_clock_by_name("emc"), ULONG_MAX); /* Vote on memory bus frequency based on cpu frequency; cpu rate is in kHz, emc rate is in Hz */ if (cpu_rate >= 1300000) return emc_max_rate; /* cpu >= 1.3GHz, emc max */ else if (cpu_rate >= 975000) return 550000000; /* cpu >= 975 MHz, emc 550 MHz */ else if (cpu_rate >= 725000) return 350000000; /* cpu >= 725 MHz, emc 350 MHz */ else if (cpu_rate >= 500000) return 150000000; /* cpu >= 500 MHz, emc 150 MHz */ else if (cpu_rate >= 275000) return 50000000; /* cpu >= 275 MHz, emc 50 MHz */ else return 0; /* emc min */ } #ifdef CONFIG_ARCH_TEGRA_13x_SOC /* EMC/CPU frequency operational requirement limit */ unsigned long tegra_emc_cpu_limit(unsigned long cpu_rate) { static unsigned long last_emc_rate; unsigned long emc_rate; /* Vote on memory bus frequency based on cpu frequency; cpu rate is in kHz, emc rate is in Hz */ if ((tegra_revision != TEGRA_REVISION_A01) && (tegra_revision != TEGRA_REVISION_A02)) return 0; /* no frequency dependency for A03+ revisions */ if (cpu_rate > 1020000) emc_rate = 600000000; /* cpu > 1.02GHz, emc 600MHz */ else emc_rate = 300000000; /* 300MHz floor always */ /* When going down, allow some time for CPU DFLL to settle */ if (emc_rate < last_emc_rate) udelay(200); /* FIXME: to be characterized */ last_emc_rate = emc_rate; return emc_rate; } #endif int tegra_update_mselect_rate(unsigned long cpu_rate) { static struct clk *mselect; /* statics init to 0 */ unsigned long mselect_rate; if (!mselect) { mselect = tegra_get_clock_by_name("cpu.mselect"); if (!mselect) return -ENODEV; } /* Vote on mselect frequency based on cpu frequency: keep mselect at half of cpu rate up to 102 MHz; cpu rate is in kHz, mselect rate is in Hz */ mselect_rate = DIV_ROUND_UP(cpu_rate, 2) * 1000; mselect_rate = min(mselect_rate, 102000000UL); return clk_set_rate(mselect, mselect_rate); } #endif #ifdef CONFIG_PM_SLEEP static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM + PERIPH_CLK_SOURCE_NUM + 25]; static int tegra12_clk_suspend(void) { unsigned long off; u32 *ctx = clk_rst_suspend; *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK; *ctx++ = clk_readl(CPU_SOFTRST_CTRL); *ctx++ = clk_readl(CPU_SOFTRST_CTRL1); *ctx++ = clk_readl(CPU_SOFTRST_CTRL2); *ctx++ = clk_readl(tegra_pll_p_out1.reg); *ctx++ = clk_readl(tegra_pll_p_out3.reg); *ctx++ = clk_readl(tegra_pll_p_out5.reg); *ctx++ = clk_readl(tegra_pll_a.reg + PLL_BASE); *ctx++ = clk_readl(tegra_pll_a.reg + PLL_MISC(&tegra_pll_a)); *ctx++ = clk_readl(tegra_pll_d.reg + PLL_BASE); *ctx++ = clk_readl(tegra_pll_d.reg + PLL_MISC(&tegra_pll_d)); *ctx++ = clk_readl(tegra_pll_m_out1.reg); *ctx++ = clk_readl(tegra_pll_a_out0.reg); *ctx++ = clk_readl(tegra_pll_c_out1.reg); #ifndef CONFIG_ARCH_TEGRA_13x_SOC *ctx++ = clk_readl(tegra_clk_cclk_lp.reg); *ctx++ = clk_readl(tegra_clk_cclk_lp.reg + SUPER_CLK_DIVIDER); #endif *ctx++ = clk_readl(tegra_clk_sclk.reg); *ctx++ = clk_readl(tegra_clk_sclk.reg + SUPER_CLK_DIVIDER); *ctx++ = clk_readl(tegra_clk_pclk.reg); for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_LA; off += 4) { if (off == PERIPH_CLK_SOURCE_EMC) continue; *ctx++ = clk_readl(off); } for (off = PERIPH_CLK_SOURCE_MSELECT; off <= PERIPH_CLK_SOURCE_SE; off+=4) { *ctx++ = clk_readl(off); } for (off = AUDIO_DLY_CLK; off <= AUDIO_SYNC_CLK_SPDIF; off+=4) { *ctx++ = clk_readl(off); } for (off = PERIPH_CLK_SOURCE_XUSB_HOST; off <= PERIPH_CLK_SOURCE_VIC; off += 4) *ctx++ = clk_readl(off); *ctx++ = clk_readl(RST_DEVICES_L); *ctx++ = clk_readl(RST_DEVICES_H); *ctx++ = clk_readl(RST_DEVICES_U); *ctx++ = clk_readl(RST_DEVICES_V); *ctx++ = clk_readl(RST_DEVICES_W); *ctx++ = clk_readl(RST_DEVICES_X); *ctx++ = clk_readl(CLK_OUT_ENB_L); *ctx++ = clk_readl(CLK_OUT_ENB_H); *ctx++ = clk_readl(CLK_OUT_ENB_U); *ctx++ = clk_readl(CLK_OUT_ENB_V); *ctx++ = clk_readl(CLK_OUT_ENB_W); *ctx++ = clk_readl(CLK_OUT_ENB_X); *ctx++ = clk_readlx(tegra_clk_cclk_g.reg); *ctx++ = clk_readlx(tegra_clk_cclk_g.reg + SUPER_CLK_DIVIDER); *ctx++ = clk_readl(SPARE_REG); *ctx++ = clk_readl(MISC_CLK_ENB); *ctx++ = clk_readl(CLK_MASK_ARM); *ctx++ = clk_get_rate_all_locked(&tegra_clk_emc); pr_debug("%s: suspend entries: %d, suspend array: %u\n", __func__, (s32)(ctx - clk_rst_suspend), (u32)ARRAY_SIZE(clk_rst_suspend)); BUG_ON((ctx - clk_rst_suspend) > ARRAY_SIZE(clk_rst_suspend)); return 0; } static void tegra12_clk_resume(void) { unsigned long off, rate; const u32 *ctx = clk_rst_suspend; u32 val; u32 plla_base; u32 plld_base; u32 pll_p_out12, pll_p_out34; u32 pll_a_out0, pll_m_out1, pll_c_out1; struct clk *p; /* FIXME: OSC_CTRL already restored by warm boot code? */ val = clk_readl(OSC_CTRL) & ~OSC_CTRL_MASK; val |= *ctx++; clk_writel(val, OSC_CTRL); clk_writel(*ctx++, CPU_SOFTRST_CTRL); clk_writel(*ctx++, CPU_SOFTRST_CTRL1); clk_writel(*ctx++, CPU_SOFTRST_CTRL2); /* Since we are going to reset devices and switch clock sources in this * function, plls and secondary dividers is required to be enabled. The * actual value will be restored back later. Note that boot plls: pllm, * pllp, and pllu are already configured and enabled */ val = PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE; val |= val << 16; pll_p_out12 = *ctx++; clk_writel(pll_p_out12 | val, tegra_pll_p_out1.reg); pll_p_out34 = *ctx++; clk_writel(pll_p_out34 | val, tegra_pll_p_out3.reg); /* Restore as is, GPU is rail-gated, anyway */ clk_writel(*ctx++, tegra_pll_p_out5.reg); tegra12_pllss_clk_resume_enable(&tegra_pll_c4); tegra12_pllss_clk_resume_enable(&tegra_pll_d2); tegra12_pllss_clk_resume_enable(&tegra_pll_dp); tegra12_pllcx_clk_resume_enable(&tegra_pll_c2); tegra12_pllcx_clk_resume_enable(&tegra_pll_c3); tegra12_pllxc_clk_resume_enable(&tegra_pll_c); tegra12_pllxc_clk_resume_enable(&tegra_pll_x); tegra12_pllre_clk_resume_enable(&tegra_pll_re_out); plla_base = *ctx++; clk_writel(*ctx++, tegra_pll_a.reg + PLL_MISC(&tegra_pll_a)); clk_writel(plla_base | PLL_BASE_ENABLE, tegra_pll_a.reg + PLL_BASE); plld_base = *ctx++; clk_writel(*ctx++, tegra_pll_d.reg + PLL_MISC(&tegra_pll_d)); clk_writel(plld_base | PLL_BASE_ENABLE, tegra_pll_d.reg + PLL_BASE); udelay(1000); val = PLL_OUT_CLKEN | PLL_OUT_RESET_DISABLE; pll_m_out1 = *ctx++; clk_writel(pll_m_out1 | val, tegra_pll_m_out1.reg); pll_a_out0 = *ctx++; clk_writel(pll_a_out0 | val, tegra_pll_a_out0.reg); pll_c_out1 = *ctx++; clk_writel(pll_c_out1 | val, tegra_pll_c_out1.reg); #ifndef CONFIG_ARCH_TEGRA_13x_SOC val = *ctx++; tegra12_super_clk_resume(&tegra_clk_cclk_lp, tegra_clk_virtual_cpu_lp.u.cpu.backup, val); clk_writel(*ctx++, tegra_clk_cclk_lp.reg + SUPER_CLK_DIVIDER); #endif clk_writel(*ctx++, tegra_clk_sclk.reg); clk_writel(*ctx++, tegra_clk_sclk.reg + SUPER_CLK_DIVIDER); clk_writel(*ctx++, tegra_clk_pclk.reg); /* enable all clocks before configuring clock sources */ clk_writel(CLK_OUT_ENB_L_RESET_MASK, CLK_OUT_ENB_L); clk_writel(CLK_OUT_ENB_H_RESET_MASK, CLK_OUT_ENB_H); clk_writel(CLK_OUT_ENB_U_RESET_MASK, CLK_OUT_ENB_U); clk_writel(CLK_OUT_ENB_V_RESET_MASK, CLK_OUT_ENB_V); clk_writel(CLK_OUT_ENB_W_RESET_MASK, CLK_OUT_ENB_W); clk_writel(CLK_OUT_ENB_X_RESET_MASK, CLK_OUT_ENB_X); wmb(); for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_LA; off += 4) { if (off == PERIPH_CLK_SOURCE_EMC) continue; clk_writel(*ctx++, off); } for (off = PERIPH_CLK_SOURCE_MSELECT; off <= PERIPH_CLK_SOURCE_SE; off += 4) { clk_writel(*ctx++, off); } for (off = AUDIO_DLY_CLK; off <= AUDIO_SYNC_CLK_SPDIF; off+=4) { clk_writel(*ctx++, off); } for (off = PERIPH_CLK_SOURCE_XUSB_HOST; off <= PERIPH_CLK_SOURCE_VIC; off += 4) clk_writel(*ctx++, off); udelay(RESET_PROPAGATION_DELAY); clk_writel(*ctx++, RST_DEVICES_L); clk_writel(*ctx++, RST_DEVICES_H); clk_writel(*ctx++, RST_DEVICES_U); clk_writel(*ctx++, RST_DEVICES_V); clk_writel(*ctx++, RST_DEVICES_W); clk_writel(*ctx++, RST_DEVICES_X); wmb(); clk_writel(*ctx++, CLK_OUT_ENB_L); clk_writel(*ctx++, CLK_OUT_ENB_H); clk_writel(*ctx++, CLK_OUT_ENB_U); /* For LP0 resume, clk to lpcpu is required to be on */ val = *ctx++; val |= CLK_OUT_ENB_V_CLK_ENB_CPULP_EN; clk_writel(val, CLK_OUT_ENB_V); clk_writel(*ctx++, CLK_OUT_ENB_W); clk_writel(*ctx++, CLK_OUT_ENB_X); wmb(); /* DFLL resume after cl_dvfs and i2c5 clocks are resumed */ tegra12_dfll_clk_resume(&tegra_dfll_cpu); /* CPU G clock restored after DFLL and PLLs */ clk_writelx(*ctx++, tegra_clk_cclk_g.reg); clk_writelx(*ctx++, tegra_clk_cclk_g.reg + SUPER_CLK_DIVIDER); clk_writel(*ctx++, SPARE_REG); clk_writel(*ctx++, MISC_CLK_ENB); clk_writel(*ctx++, CLK_MASK_ARM); /* Restore back the actual pll and secondary divider values */ clk_writel(pll_p_out12, tegra_pll_p_out1.reg); clk_writel(pll_p_out34, tegra_pll_p_out3.reg); p = &tegra_pll_c4; if (p->state == OFF) tegra12_pllss_clk_disable(p); p = &tegra_pll_d2; if (p->state == OFF) tegra12_pllss_clk_disable(p); p = &tegra_pll_dp; if (p->state == OFF) tegra12_pllss_clk_disable(p); p = &tegra_pll_c2; if (p->state == OFF) tegra12_pllcx_clk_disable(p); p = &tegra_pll_c3; if (p->state == OFF) tegra12_pllcx_clk_disable(p); p = &tegra_pll_c; if (p->state == OFF) tegra12_pllxc_clk_disable(p); p = &tegra_pll_x; if (p->state == OFF) tegra12_pllxc_clk_disable(p); p = &tegra_pll_re_vco; if (p->state == OFF) tegra12_pllre_clk_disable(p); clk_writel(plla_base, tegra_pll_a.reg + PLL_BASE); clk_writel(plld_base, tegra_pll_d.reg + PLL_BASE); clk_writel(pll_m_out1, tegra_pll_m_out1.reg); clk_writel(pll_a_out0, tegra_pll_a_out0.reg); clk_writel(pll_c_out1, tegra_pll_c_out1.reg); /* Since EMC clock is not restored, and may not preserve parent across suspend, update current state, and mark EMC DFS as out of sync */ p = tegra_clk_emc.parent; tegra12_periph_clk_init(&tegra_clk_emc); /* Turn Off pll_m if it was OFF before suspend, and emc was not switched to pll_m across suspend; re-init pll_m to sync s/w and h/w states */ if ((tegra_pll_m.state == OFF) && (&tegra_pll_m != tegra_clk_emc.parent)) tegra12_pllm_clk_disable(&tegra_pll_m); tegra12_pllm_clk_init(&tegra_pll_m); if (p != tegra_clk_emc.parent) { pr_debug("EMC parent(refcount) across suspend: %s(%d) : %s(%d)", p->name, p->refcnt, tegra_clk_emc.parent->name, tegra_clk_emc.parent->refcnt); /* emc switched to the new parent by low level code, but ref count and s/w state need to be updated */ clk_disable_locked(p); clk_enable_locked(tegra_clk_emc.parent); } rate = clk_get_rate_all_locked(&tegra_clk_emc); if (*ctx != rate) { tegra_dvfs_set_rate(&tegra_clk_emc, rate); if (p == tegra_clk_emc.parent) { rate = clk_get_rate_all_locked(p); tegra_dvfs_set_rate(p, rate); } } tegra_emc_timing_invalidate(); tegra12_pll_clk_init(&tegra_pll_u); /* Re-init utmi parameters */ tegra12_plle_clk_resume(&tegra_pll_e); /* Restore plle parent as pll_re_vco */ tegra12_pllp_clk_resume(&tegra_pll_p); /* Fire a bug if not restored */ tegra12_mc_holdoff_enable(); } static struct syscore_ops tegra_clk_syscore_ops = { .suspend = tegra12_clk_suspend, .resume = tegra12_clk_resume, .save = tegra12_clk_suspend, .restore = tegra12_clk_resume, }; #endif /* Tegra12 CPU clock and reset control functions */ static void tegra12_wait_cpu_in_reset(u32 cpu) { unsigned int reg; do { reg = readl(reg_clk_base + TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS); cpu_relax(); } while (!(reg & (1 << cpu))); /* check CPU been reset or not */ return; } static void tegra12_put_cpu_in_reset(u32 cpu) { writel(CPU_RESET(cpu), reg_clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET); dmb(); } static void tegra12_cpu_out_of_reset(u32 cpu) { writel(CPU_RESET(cpu), reg_clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR); wmb(); } static void tegra12_enable_cpu_clock(u32 cpu) { unsigned int reg; writel(CPU_CLOCK(cpu), reg_clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR); reg = readl(reg_clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR); } static void tegra12_disable_cpu_clock(u32 cpu) { } static struct tegra_cpu_car_ops tegra12_cpu_car_ops = { .wait_for_reset = tegra12_wait_cpu_in_reset, .put_in_reset = tegra12_put_cpu_in_reset, .out_of_reset = tegra12_cpu_out_of_reset, .enable_clock = tegra12_enable_cpu_clock, .disable_clock = tegra12_disable_cpu_clock, }; void __init tegra12_cpu_car_ops_init(void) { tegra_cpu_car_ops = &tegra12_cpu_car_ops; } static void tegra12_init_xusb_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(tegra_xusb_source_clks); i++) tegra12_init_one_clock(&tegra_xusb_source_clks[i]); tegra12_init_one_clock(&tegra_xusb_ss_div2); tegra12_init_one_clock(&tegra_xusb_hs_src); for (i = 0; i < ARRAY_SIZE(tegra_xusb_coupled_clks); i++) tegra12_init_one_clock(&tegra_xusb_coupled_clks[i]); } #ifdef CONFIG_TEGRA_PREINIT_CLOCKS #define CLK_RSTENB_DEV_V_0_AUDIO_BIT (1 << 10) #define CLK_RSTENB_DEV_V_0_3D2_BIT (1 << 2) #define CLK_RSTENB_DEV_L_0_HOST1X_BIT (1 << 28) #define CLK_RSTENB_DEV_L_0_DISP1_BIT (1 << 27) #define CLK_RSTENB_DEV_L_0_3D_BIT (1 << 24) #define CLK_RSTENB_DEV_L_0_ISP_BIT (1 << 23) #define CLK_RSTENB_DEV_L_0_2D_BIT (1 << 21) #define CLK_RSTENB_DEV_L_0_VI_BIT (1 << 20) #define CLK_RSTENB_DEV_L_0_EPP_BIT (1 << 19) #define CLK_RSTENB_DEV_H_0_VDE_BIT (1 << 29) #define CLK_RSTENB_DEV_H_0_MPE_BIT (1 << 28) #define CLK_RSTENB_DEV_U_0_CSITE_BIT (1 << 9) #define CLK_RSTENB_DEV_X_0_HDMI_AUDIO_BIT (1 << 16) #define HOST1X_CLK_REG_OFFSET 0x180 #define HOST1X_CLK_SRC_SHIFT 30 #define HOST1X_CLK_SRC_MASK (0x3 << HOST1X_CLK_SRC_SHIFT) #define HOST1X_CLK_SRC_PLLM_OUT0 0 #define HOST1X_CLK_SRC_PLLC_OUT0 1 #define HOST1X_CLK_SRC_PLLP_OUT0 2 #define HOST1X_CLK_SRC_PLLA_OUT0 3 #define HOST1X_CLK_SRC_DEFAULT (\ HOST1X_CLK_SRC_PLLP_OUT0 << HOST1X_CLK_SRC_SHIFT) #define HOST1X_CLK_IDLE_DIV_SHIFT 8 #define HOST1X_CLK_IDLE_DIV_MASK (0xff << HOST1X_CLK_IDLE_DIV_SHIFT) #define HOST1X_CLK_IDLE_DIV_DEFAULT (0 << HOST1X_CLK_IDLE_DIV_SHIFT) #define HOST1X_CLK_DIV_SHIFT 0 #define HOST1X_CLK_DIV_MASK (0xff << HOST1X_CLK_DIV_SHIFT) #define HOST1X_CLK_DIV_DEFAULT (3 << HOST1X_CLK_DIV_SHIFT) #define VCLK_SRC_SHIFT 30 #define VCLK_SRC_MASK (0x3 << VCLK_SRC_SHIFT) #define VCLK_SRC_PLLM_OUT0 0 #define VCLK_SRC_PLLC_OUT0 1 #define VCLK_SRC_PLLP_OUT0 2 #define VCLK_SRC_PLLA_OUT0 3 #define VCLK_SRC_DEFAULT (VCLK_SRC_PLLM_OUT0 << VCLK_SRC_SHIFT) #define VCLK_IDLE_DIV_SHIFT 8 #define VCLK_IDLE_DIV_MASK (0xff << VCLK_IDLE_DIV_SHIFT) #define VCLK_IDLE_DIV_DEFAULT (0 << VCLK_IDLE_DIV_SHIFT) #define VCLK_DIV_SHIFT 0 #define VCLK_DIV_MASK (0xff << VCLK_DIV_SHIFT) #define VCLK_DIV_DEFAULT (0xa << VCLK_DIV_SHIFT) #define ISP_CLK_REG_OFFSET 0x144 #define VI_CLK_REG_OFFSET 0x148 #define VI_SENSOR_CLK_REG_OFFSET 0x1a8 #define VI_SENSOR2_CLK_REG_OFFSET 0x658 #define VI_CLK_DIV_DEFAULT (0x12 << VCLK_DIV_SHIFT) #define G3D_CLK_REG_OFFSET 0x158 #define G2D_CLK_REG_OFFSET 0x15c #define EPP_CLK_REG_OFFSET 0x16c #define MPE_CLK_REG_OFFSET 0x170 #define VDE_CLK_REG_OFFSET 0x170 #define G3D2_CLK_REG_OFFSET 0x3b0 #define HDMI_AUDIO_CLK_REG_OFFSET 0x668 #define HDMI_AUDIO_CLK_DIV_DEFAULT (0x12 << VCLK_DIV_SHIFT) #define CSITE_CLK_REG_OFFSET 0x1d4 #define CSITE_CLK_DIV_DEFAULT (0x4 << VCLK_DIV_SHIFT) static void __init clk_setbit(u32 reg, u32 bit) { u32 val = clk_readl(reg); if ((val & bit) == bit) return; val |= bit; clk_writel(val, reg); udelay(2); } static void __init clk_clrbit(u32 reg, u32 bit) { u32 val = clk_readl(reg); if ((val & bit) == 0) return; val &= ~bit; clk_writel(val, reg); udelay(2); } static void __init clk_setbits(u32 reg, u32 bits, u32 mask) { u32 val = clk_readl(reg); if ((val & mask) == bits) return; val &= ~mask; val |= bits; clk_writel(val, reg); udelay(2); } static void __init vclk_init(int tag, u32 src, u32 rebit) { u32 rst, enb; switch (tag) { case 'L': rst = RST_DEVICES_L; enb = CLK_OUT_ENB_L; break; case 'H': rst = RST_DEVICES_H; enb = CLK_OUT_ENB_H; break; case 'U': rst = RST_DEVICES_U; enb = CLK_OUT_ENB_U; break; case 'V': rst = RST_DEVICES_V; enb = CLK_OUT_ENB_V; break; case 'W': rst = RST_DEVICES_W; enb = CLK_OUT_ENB_W; break; case 'X': rst = RST_DEVICES_X; enb = CLK_OUT_ENB_X; break; default: /* Quietly ignore. */ return; } clk_setbit(rst, rebit); clk_clrbit(enb, rebit); clk_setbits(src, VCLK_SRC_DEFAULT, VCLK_SRC_MASK); clk_setbits(src, VCLK_DIV_DEFAULT, VCLK_DIV_MASK); clk_clrbit(rst, rebit); } static int __init tegra_soc_preinit_clocks(void) { /* * Make sure host1x clock configuration has: * HOST1X_CLK_SRC : PLLP_OUT0. * HOST1X_CLK_DIVISOR: >2 to start from safe enough frequency. */ clk_setbit(RST_DEVICES_L, CLK_RSTENB_DEV_L_0_HOST1X_BIT); clk_setbit(CLK_OUT_ENB_L, CLK_RSTENB_DEV_L_0_HOST1X_BIT); clk_setbits(HOST1X_CLK_REG_OFFSET, HOST1X_CLK_DIV_DEFAULT, HOST1X_CLK_DIV_MASK); clk_setbits(HOST1X_CLK_REG_OFFSET, HOST1X_CLK_IDLE_DIV_DEFAULT, HOST1X_CLK_IDLE_DIV_MASK); clk_setbits(HOST1X_CLK_REG_OFFSET, HOST1X_CLK_SRC_DEFAULT, HOST1X_CLK_SRC_MASK); clk_clrbit(RST_DEVICES_L, CLK_RSTENB_DEV_L_0_HOST1X_BIT); /* * Make sure vi clock configuration has: * VI_CLK_DIVISOR: 0x12 * VI_SENSOR_CLK_DIVISOR: 0x12 * VI_SENSOR2_CLK_DIVISOR: 0x12 */ clk_setbit(RST_DEVICES_L, CLK_RSTENB_DEV_L_0_VI_BIT); clk_setbit(CLK_OUT_ENB_L, CLK_RSTENB_DEV_L_0_VI_BIT); clk_setbits(VI_CLK_REG_OFFSET, VCLK_SRC_DEFAULT, VCLK_SRC_MASK); clk_setbits(VI_CLK_REG_OFFSET, VI_CLK_DIV_DEFAULT, VCLK_DIV_MASK); clk_setbits(VI_SENSOR_CLK_REG_OFFSET, VCLK_SRC_DEFAULT, VCLK_SRC_MASK); clk_setbits(VI_SENSOR_CLK_REG_OFFSET, VI_CLK_DIV_DEFAULT, VCLK_DIV_MASK); clk_setbits(VI_SENSOR2_CLK_REG_OFFSET, VCLK_SRC_DEFAULT, VCLK_SRC_MASK); clk_setbits(VI_SENSOR2_CLK_REG_OFFSET, VI_CLK_DIV_DEFAULT, VCLK_DIV_MASK); clk_clrbit(RST_DEVICES_L, CLK_RSTENB_DEV_L_0_VI_BIT); /* * Make sure hdmi_audio clock configuration has: * HDMI_AUDIO_CLK_DIVISOR: 0x12 */ clk_setbit(RST_DEVICES_X, CLK_RSTENB_DEV_X_0_HDMI_AUDIO_BIT); clk_setbit(CLK_OUT_ENB_X, CLK_RSTENB_DEV_X_0_HDMI_AUDIO_BIT); clk_setbits(HDMI_AUDIO_CLK_REG_OFFSET, HDMI_AUDIO_CLK_DIV_DEFAULT, VCLK_DIV_MASK); clk_clrbit(RST_DEVICES_X, CLK_RSTENB_DEV_X_0_HDMI_AUDIO_BIT); /* * Make sure csite clock configuration has: * CSITE_CLK_DIVISOR: 0x4 */ clk_setbit(RST_DEVICES_U, CLK_RSTENB_DEV_U_0_CSITE_BIT); clk_setbit(CLK_OUT_ENB_U, CLK_RSTENB_DEV_U_0_CSITE_BIT); clk_setbits(CSITE_CLK_REG_OFFSET, CSITE_CLK_DIV_DEFAULT, VCLK_DIV_MASK); clk_clrbit(RST_DEVICES_U, CLK_RSTENB_DEV_U_0_CSITE_BIT); /* Pre-initialize Video clocks. */ vclk_init('L', G3D_CLK_REG_OFFSET, CLK_RSTENB_DEV_L_0_3D_BIT); vclk_init('L', G2D_CLK_REG_OFFSET, CLK_RSTENB_DEV_L_0_2D_BIT); vclk_init('L', ISP_CLK_REG_OFFSET, CLK_RSTENB_DEV_L_0_ISP_BIT); vclk_init('L', EPP_CLK_REG_OFFSET, CLK_RSTENB_DEV_L_0_EPP_BIT); vclk_init('H', VDE_CLK_REG_OFFSET, CLK_RSTENB_DEV_H_0_VDE_BIT); vclk_init('H', MPE_CLK_REG_OFFSET, CLK_RSTENB_DEV_H_0_MPE_BIT); vclk_init('V', G3D2_CLK_REG_OFFSET, CLK_RSTENB_DEV_V_0_3D2_BIT); return 0; } #endif /* CONFIG_TEGRA_PREINIT_CLOCKS */ void __init tegra12x_init_clocks(void) { int i; struct clk *c; #ifdef CONFIG_TEGRA_PREINIT_CLOCKS tegra_soc_preinit_clocks(); #endif /* CONFIG_TEGRA_PREINIT_CLOCKS */ for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++) tegra12_init_one_clock(tegra_ptr_clks[i]); /* Fix bug in simulator clock routing */ if (tegra_platform_is_linsim()) { for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++) { if (!strcmp("msenc", tegra_list_clks[i].name)) { tegra_list_clks[i].u.periph.clk_num = 60; tegra_list_clks[i].reg = 0x170; tegra_list_clks[i].flags &= ~MUX8; } } } for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++) tegra12_init_one_clock(&tegra_list_clks[i]); for (i = 0; i < ARRAY_SIZE(tegra_visp_clks); i++) tegra12_init_one_clock(&tegra_visp_clks[i]); for (i = 0; i < ARRAY_SIZE(tegra_ptr_camera_mclks); i++) tegra12_init_one_clock(tegra_ptr_camera_mclks[i]); for (i = 0; i < ARRAY_SIZE(tegra_sync_source_list); i++) tegra12_init_one_clock(&tegra_sync_source_list[i]); for (i = 0; i < ARRAY_SIZE(tegra_clk_audio_list); i++) tegra12_init_one_clock(&tegra_clk_audio_list[i]); for (i = 0; i < ARRAY_SIZE(tegra_clk_audio_2x_list); i++) tegra12_init_one_clock(&tegra_clk_audio_2x_list[i]); init_clk_out_mux(); for (i = 0; i < ARRAY_SIZE(tegra_clk_out_list); i++) tegra12_init_one_clock(&tegra_clk_out_list[i]); tegra12_init_xusb_clocks(); for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) { c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name); if (!c) { pr_err("%s: Unknown duplicate clock %s\n", __func__, tegra_clk_duplicates[i].name); continue; } tegra_clk_duplicates[i].lookup.clk = c; clkdev_add(&tegra_clk_duplicates[i].lookup); } /* Initialize to default */ tegra_init_cpu_edp_limits(0); tegra12_cpu_car_ops_init(); /* Tegra12 allows to change dividers of disabled clocks */ tegra_clk_set_disabled_div_all(); #ifdef CONFIG_PM_SLEEP register_syscore_ops(&tegra_clk_syscore_ops); #endif } static int __init tegra12x_clk_late_init(void) { clk_disable(&tegra_pll_d); clk_disable(&tegra_pll_re_vco); return 0; } late_initcall(tegra12x_clk_late_init);
gpl-2.0
SandPox/android_kernel_samsung_kyleproxx
arch/arm/mach-hawaii/pm_dbg.c
7
11042
/******************************************************************************/ /* (c) 2011 Broadcom Corporation */ /* */ /* Unless you and Broadcom execute a separate written software license */ /* agreement governing use of this software, this software is licensed to you */ /* under the terms of the GNU General Public License version 2, available at */ /* http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). */ /* */ /******************************************************************************/ #include <linux/module.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <plat/kona_pm_dbg.h> #include <plat/pwr_mgr.h> #include <plat/pi_mgr.h> #include <mach/io_map.h> #include <mach/rdb/brcm_rdb_csr.h> #include <mach/rdb/brcm_rdb_hsotg_ctrl.h> #include <mach/rdb/brcm_rdb_gpio.h> #include <mach/pm.h> #include <mach/pwr_mgr.h> #include <mach/pi_mgr.h> #include <mach/memory.h> #ifdef CONFIG_KONA_PROFILER #include <plat/profiler.h> #endif #if defined (CONFIG_MACH_HAWAII_SS_COMMON) extern void uas_jig_force_sleep(void); #endif /***************************************************************************** * SLEEP STATE DEBUG INTERFACE * *****************************************************************************/ #define GPIO_GPORS_BASE_PHYS (GPIO2_BASE_ADDR + GPIO_GPORS0_OFFSET) #define GPIO_GPORS_BASE_VIRT (KONA_GPIO2_VA + GPIO_GPORS0_OFFSET) #define GPIO_GPORC_BASE_PHYS (GPIO2_BASE_ADDR + GPIO_GPORC0_OFFSET) #define GPIO_GPORC_BASE_VIRT (KONA_GPIO2_VA + GPIO_GPORC0_OFFSET) #define LPM_TRACE_PER_CORE_BUF_SIZE SZ_1K /*should be a multiple of 4*/ #define LPM_TRACE_CORE_OFFSET(c) (LPM_TRACE_PER_CORE_BUF_SIZE*(c)) #define LPM_TRACE_ENCODE(p, v) ((((p) & LPM_TRACE_PARAM_MASK) << \ LPM_TRACE_PARAM_SHIFT) | \ ((v) & LPM_TRACE_VAL_MASK)) /* No check for gpio number to speed up the API */ void dbg_gpio_set(u32 gpio) { u32 reg, bit; reg = gpio / 32; reg = GPIO_GPORS_BASE_VIRT + reg * 4; bit = 1 << (gpio % 32); __raw_writel(bit, reg); } EXPORT_SYMBOL_GPL(dbg_gpio_set); /* No check for gpio number to speed up the API */ void dbg_gpio_clr(u32 gpio) { u32 reg, bit; reg = gpio / 32; reg = GPIO_GPORC_BASE_VIRT + reg * 4; bit = 1 << (gpio % 32); __raw_writel(bit, reg); } EXPORT_SYMBOL_GPL(dbg_gpio_clr); /* * Dormant mode profiling */ #if defined(DORMANT_PROFILE) && defined(CONFIG_A9_DORMANT_MODE) static u32 ns_gpio; static u32 sec_gpio; static u32 ref_gpio; void clear_ns_gpio(void) { if (dormant_profile_on) dbg_gpio_clr(ns_gpio); } /* static void dormant_profile_entry(void) { if (dormant_profile_on) { dbg_gpio_set(ns_gpio); dbg_gpio_set(sec_gpio); dbg_gpio_set(ref_gpio); } } static void dormant_profile_exit(void) { if (dormant_profile_on) { dbg_gpio_clr(ns_gpio); dbg_gpio_clr(sec_gpio); dbg_gpio_clr(ref_gpio); } } */ static void dormant_profile_config(u32 on, u32 ns, u32 sec, u32 ref) { u32 reg; /* Setup configs for C routines */ ns_gpio = ns; sec_gpio = sec; ref_gpio = ref; /* Setup configs for asm routines */ reg = ns / 32; ns_gpio_set_v = GPIO_GPORS_BASE_VIRT + reg * 4; ns_gpio_clr_v = GPIO_GPORC_BASE_VIRT + reg * 4; ns_gpio_set_p = GPIO_GPORS_BASE_PHYS + reg * 4; ns_gpio_clr_p = GPIO_GPORC_BASE_PHYS + reg * 4; ns_gpio_bit = 1 << (ns % 32); /* Setup common configs */ dormant_profile_on = on; } #else /* !DORMANT_PROFILE && !CONFIG_A9_DORMANT_MODE */ void clear_ns_gpio(void) { } static void dormant_profile_config(u32 on, u32 ns, u32 sec, u32 ref) { } #endif /* DORMANT_PROFILE && CONFIG_A9_DORMANT_MODE */ u32 dorm_profile_enable; static u32 lpm_trace_buf[CONFIG_NR_CPUS]; struct debug { int dummy; }; #define __param_check_debug(name, p, type) \ static inline struct type *__check_##name(void) { return (p); } #define param_check_debug(name, p) \ __param_check_debug(name, p, debug) static int param_set_debug(const char *val, const struct kernel_param *kp); static int param_get_debug(char *buffer, const struct kernel_param *kp); static struct kernel_param_ops param_ops_debug = { .set = param_set_debug, .get = param_get_debug, }; static struct debug debug; module_param_named(debug, debug, debug, S_IRUGO | S_IWUSR | S_IWGRP); /* List of supported commands */ enum { CMD_SHOW_HELP = 'h', CMD_DORMANT = 'd', CMD_DISPLAY_STATS = 's', CMD_FORCE_SLEEP = 'f', }; static void cmd_show_usage(void) { const char usage[] = "Usage:\n" "echo 'cmd string' > /sys/module/pm_dbg/parameters/debug\n" "'cmd string' is constructed as follows:\n" #ifdef DORMANT_PROFILE "start dormant profile: d p 1 <ns_gpio> <sec_gpio> <ref_gpio>\n" "stop dormant profile : d p 0 0 0 0\n" #endif "force sleep: f <state from 0 to 4>\n" "display stats: s\n" "\n"; pr_info("%s", usage); } static void cmd_display_stats(const char *p) { } static int force_sleep_state = 2; int get_force_sleep_state(void) { return force_sleep_state; } static void cmd_force_sleep(const char *p) { /* coverity[secure_coding] */ sscanf(p, "%d", &force_sleep_state); if (force_sleep_state < 0 || force_sleep_state > 4) { pr_err("Invalid state: %d\n", force_sleep_state); force_sleep_state = 2; return; } pr_info("%s: Forcing system to state: %d\n", __func__, force_sleep_state); #if defined(CONFIG_MACH_HAWAII_SS_COMMON) uas_jig_force_sleep(); #endif kona_pm_reg_pm_enter_handler(&hawaii_force_sleep); request_suspend_state(PM_SUSPEND_MEM); } #ifdef CONFIG_A9_DORMANT_MODE static void cmd_dormant_profile(const char *p) { u32 on, ns, sec, ref; /* coverity[secure_coding] */ sscanf(p, "%08x %08x %08x %08x", &on, &ns, &sec, &ref); dormant_profile_config(on, ns, sec, ref); } static void cmd_dormant(const char *p) { char cmd = *p; p++; while (*p == ' ' || *p == '\t') p++; switch (cmd) { case 'p': /* Handle dormant profile commands */ cmd_dormant_profile(p); break; } } #endif static int param_set_debug(const char *val, const struct kernel_param *kp) { const char *p; if (!val) return -EINVAL; p = &val[1]; /* First character is the command. Skip past all whitespaces * after the command to reach the arguments, if any. */ while (*p == ' ' || *p == '\t') p++; switch (val[0]) { #ifdef CONFIG_A9_DORMANT_MODE case CMD_DORMANT: cmd_dormant(p); break; #endif case CMD_DISPLAY_STATS: cmd_display_stats(p); break; case CMD_FORCE_SLEEP: cmd_force_sleep(p); break; case CMD_SHOW_HELP: /* Fall-through */ default: cmd_show_usage(); break; } return 0; } static int param_get_debug(char *buffer, const struct kernel_param *kp) { cmd_show_usage(); return 0; } /***************************************************************************** * LPM MODE INSTRUMENTATION * *****************************************************************************/ void instrument_lpm(u16 trace, u16 param) { u32 pos; u32 byte_off; u32 val; /* Fn will be called with interrupts disabled, so use smp_processor_id. get_cpu can't be used here due to dormant */ int cpu = smp_processor_id(); pos = readl_relaxed(lpm_trace_buf[cpu]) + 1; byte_off = pos << 2; /*pos*4*/ BUG_ON(byte_off > (LPM_TRACE_PER_CORE_BUF_SIZE - 4)); val = LPM_TRACE_ENCODE(param, trace); writel_relaxed(val, (lpm_trace_buf[cpu] + byte_off)); if (byte_off == (LPM_TRACE_PER_CORE_BUF_SIZE - 4)) pos = 0; writel_relaxed(pos, lpm_trace_buf[cpu]); } /***************************************************************************** * INTERFACE TO TAKE REGISTER SNAPSHOT BEFORE SLEEP * *****************************************************************************/ static u32 mm_pi_id = PI_MGR_PI_ID_MM; static u32 hub_pi_id = PI_MGR_PI_ID_HUB_SWITCHABLE; static u32 hub_aon_pi_id = PI_MGR_PI_ID_HUB_AON; static u32 arm_pi_id = PI_MGR_PI_ID_ARM_CORE; static u32 arm_subsys_pi_id = PI_MGR_PI_ID_ARM_SUB_SYSTEM; static u32 modem_pi_id = PI_MGR_PI_ID_MODEM; static u32 get_pi_count(void *data) { u32 ret = 0; int id = *(int *)data; ret = pi_get_use_count(id); return ret; } #define AP_MIN_PWR_STATE \ (KONA_MEMC0_NS_VA + CSR_APPS_MIN_PWR_STATE_OFFSET) #define MODEM_MIN_PWR_STATE \ (KONA_MEMC0_NS_VA + CSR_MODEM_MIN_PWR_STATE_OFFSET) #define DSP_MIN_PWR_STATE \ (KONA_MEMC0_NS_VA + CSR_DSP_MIN_PWR_STATE_OFFSET) #define USB_OTG_P1CTL \ (KONA_USB_HSOTG_CTRL_VA + HSOTG_CTRL_PHY_P1CTL_OFFSET) #define HW_FREQ_CHANGE_CNTRL \ (KONA_MEMC0_NS_VA+CSR_HW_FREQ_CHANGE_CNTRL_OFFSET) #define DDR_PLL_PWRDN_BIT CSR_HW_FREQ_CHANGE_CNTRL_DDR_PLL_PWRDN_ENABLE_MASK /* SNAPSHOT TABLE: * --------------- * Table of registers to be sampled before entering low power * state for debugging. */ static struct snapshot snapshot[] = { /* * Simple register parms */ SIMPLE_PARM(AP_MIN_PWR_STATE, 0, 3), SIMPLE_PARM(MODEM_MIN_PWR_STATE, 0, 3), SIMPLE_PARM(DSP_MIN_PWR_STATE, 0, 3), SIMPLE_PARM(HW_FREQ_CHANGE_CNTRL, DDR_PLL_PWRDN_BIT, DDR_PLL_PWRDN_BIT), /* * List of clocks that prevent entry to low power state */ CLK_PARM("dig_ch0_clk"), CLK_PARM("tpiu_clk"), CLK_PARM("pti_clk"), /* * AHB register parms (needs AHB clk enabled before register read) */ AHB_REG_PARM(USB_OTG_P1CTL, 0, (1 << 30), "usb_otg_clk"), /* * PI usage counts */ USER_DEFINED_PARM(get_pi_count, &mm_pi_id, "mm"), USER_DEFINED_PARM(get_pi_count, &hub_pi_id, "hub"), USER_DEFINED_PARM(get_pi_count, &hub_aon_pi_id, "hub_aon"), USER_DEFINED_PARM(get_pi_count, &arm_pi_id, "arm"), USER_DEFINED_PARM(get_pi_count, &arm_subsys_pi_id, "arm_subsys"), USER_DEFINED_PARM(get_pi_count, &modem_pi_id, "modem"), }; /***************************************************************************** * INSTRUMENT LOW POWER STATES * *****************************************************************************/ void instrument_idle_entry(void) { /* Take snapshot of registers that can potentially prevent system from * entering low power state. */ snapshot_get(); /** * Take profiling counter samples * before entering idle state */ #ifdef CONFIG_KONA_PROFILER profiler_idle_entry_cb(); #endif } void instrument_idle_exit(void) { } int __init __pmdbg_init(void) { u8 *trace_v; dma_addr_t trace_p; int i; snapshot_table_register(snapshot, ARRAY_SIZE(snapshot)); dormant_profile_config(0, 0, 0, 0); trace_v = dma_alloc_coherent(NULL, LPM_TRACE_PER_CORE_BUF_SIZE*CONFIG_NR_CPUS, &trace_p, GFP_ATOMIC | __GFP_ZERO); pr_info("%s LPM trace_v:0x%x, _p:0x%x\n", __func__, (u32)trace_v, (u32) trace_p); if (!trace_v) { pr_info("%s: LPM dma buffer alloc failed\n", __func__); return -ENOMEM; } for (i = 0; i < CONFIG_NR_CPUS; i++) lpm_trace_buf[i] = (u32)&trace_v[i*LPM_TRACE_PER_CORE_BUF_SIZE]; return 0; } arch_initcall(__pmdbg_init);
gpl-2.0
TeamWin/android_kernel_htc_a32e
kernel/sched/debug.c
7
14591
/* * kernel/sched/debug.c * * Print the CFS rbtree * * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/utsname.h> #include "sched.h" static DEFINE_SPINLOCK(sched_debug_lock); #define SEQ_printf(m, x...) \ do { \ if (m) \ seq_printf(m, x); \ else \ printk(x); \ } while (0) static long long nsec_high(unsigned long long nsec) { if ((long long)nsec < 0) { nsec = -nsec; do_div(nsec, 1000000); return -nsec; } do_div(nsec, 1000000); return nsec; } static unsigned long nsec_low(unsigned long long nsec) { if ((long long)nsec < 0) nsec = -nsec; return do_div(nsec, 1000000); } #define SPLIT_NS(x) nsec_high(x), nsec_low(x) #ifdef CONFIG_FAIR_GROUP_SCHED static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) { struct sched_entity *se = tg->se[cpu]; #define P(F) \ SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) #define PN(F) \ SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) if (!se) { struct sched_avg *avg = &cpu_rq(cpu)->avg; P(avg->runnable_avg_sum); P(avg->runnable_avg_period); return; } PN(se->exec_start); PN(se->vruntime); PN(se->sum_exec_runtime); #ifdef CONFIG_SCHEDSTATS PN(se->statistics.wait_start); PN(se->statistics.sleep_start); PN(se->statistics.block_start); PN(se->statistics.sleep_max); PN(se->statistics.block_max); PN(se->statistics.exec_max); PN(se->statistics.slice_max); PN(se->statistics.wait_max); PN(se->statistics.wait_sum); P(se->statistics.wait_count); #endif P(se->load.weight); #ifdef CONFIG_SMP P(se->avg.runnable_avg_sum); P(se->avg.runnable_avg_period); P(se->avg.load_avg_contrib); P(se->avg.decay_count); #endif #undef PN #undef P } #endif #ifdef CONFIG_CGROUP_SCHED static char group_path[PATH_MAX]; static char *task_group_path(struct task_group *tg) { if (autogroup_path(tg, group_path, PATH_MAX)) return group_path; cgroup_path(tg->css.cgroup, group_path, PATH_MAX); return group_path; } #endif static void print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) { if (rq->curr == p) SEQ_printf(m, "R"); else SEQ_printf(m, " "); SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", p->comm, p->pid, SPLIT_NS(p->se.vruntime), (long long)(p->nvcsw + p->nivcsw), p->prio); #ifdef CONFIG_SCHEDSTATS SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", SPLIT_NS(p->se.vruntime), SPLIT_NS(p->se.sum_exec_runtime), SPLIT_NS(p->se.statistics.sum_sleep_runtime)); #else SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); #endif #ifdef CONFIG_CGROUP_SCHED SEQ_printf(m, " %s", task_group_path(task_group(p))); #endif SEQ_printf(m, "\n"); if (!m) show_stack(p, NULL); } static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; unsigned long flags; SEQ_printf(m, "\nrunnable tasks:\n" " task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { if (!p->on_rq || task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); } void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); #else SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); #endif SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); raw_spin_lock_irqsave(&rq->lock, flags); if (cfs_rq->rb_leftmost) MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; raw_spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", SPLIT_NS(max_vruntime)); spread = max_vruntime - MIN_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); spread0 = min_vruntime - rq0_min_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", SPLIT_NS(spread0)); SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg", cfs_rq->runnable_load_avg); SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", cfs_rq->blocked_load_avg); SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", cfs_rq->tg_load_contrib); SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", cfs_rq->tg_runnable_contrib); SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", atomic_read(&cfs_rq->tg->runnable_avg)); #endif #ifdef CONFIG_CFS_BANDWIDTH SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", cfs_rq->tg->cfs_bandwidth.timer_active); SEQ_printf(m, " .%-30s: %d\n", "throttled", cfs_rq->throttled); SEQ_printf(m, " .%-30s: %d\n", "throttle_count", cfs_rq->throttle_count); #endif print_cfs_group_stats(m, cpu, cfs_rq->tg); #endif } void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) { #ifdef CONFIG_RT_GROUP_SCHED SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); #else SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); #endif #define P(x) \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) P(rt_nr_running); P(rt_throttled); PN(rt_time); PN(rt_runtime); #undef PN #undef P } extern __read_mostly int sched_clock_running; static void print_cpu(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; #ifdef CONFIG_X86 { unsigned int freq = cpu_khz ? : 1; SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", cpu, freq / 1000, (freq % 1000)); } #else SEQ_printf(m, "cpu#%d\n", cpu); #endif #define P(x) \ do { \ if (sizeof(rq->x) == 4) \ SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ else \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ } while (0) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); SEQ_printf(m, " .%-30s: %lu\n", "load", rq->load.weight); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); PN(next_balance); P(curr->pid); PN(clock); P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); #ifdef CONFIG_SMP P(cpu_power); #endif #ifdef CONFIG_SCHED_HMP P(load_scale_factor); P(capacity); P(max_possible_capacity); P(efficiency); P(cur_freq); P(max_freq); #endif #ifdef CONFIG_SCHED_HMP P(nr_big_tasks); P(nr_small_tasks); #endif #undef P #undef PN #ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); P(yld_count); P(sched_count); P(sched_goidle); #ifdef CONFIG_SMP P64(avg_idle); #endif P(ttwu_count); P(ttwu_local); #undef P #undef P64 #endif spin_lock_irqsave(&sched_debug_lock, flags); print_cfs_stats(m, cpu); print_rt_stats(m, cpu); rcu_read_lock(); print_rq(m, rq, cpu); rcu_read_unlock(); spin_unlock_irqrestore(&sched_debug_lock, flags); SEQ_printf(m, "\n"); } static const char *sched_tunable_scaling_names[] = { "none", "logaritmic", "linear" }; static void sched_debug_header(struct seq_file *m) { u64 ktime, sched_clk, cpu_clk; unsigned long flags; local_irq_save(flags); ktime = ktime_to_ns(ktime_get()); sched_clk = sched_clock(); cpu_clk = local_clock(); local_irq_restore(flags); SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); #define P(x) \ SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(ktime); PN(sched_clk); PN(cpu_clk); P(jiffies); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK P(sched_clock_stable); #endif #undef PN #undef P SEQ_printf(m, "\n"); SEQ_printf(m, "sysctl_sched\n"); #define P(x) \ SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(sysctl_sched_latency); PN(sysctl_sched_min_granularity); PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); #ifdef CONFIG_SCHED_HMP P(sched_mostly_idle_load); P(sched_small_task); P(sched_upmigrate); P(sched_downmigrate); P(sched_init_task_load_windows); P(sched_init_task_load_pelt); P(min_capacity); P(max_capacity); P(sched_use_pelt); P(sched_ravg_window); #endif #undef PN #undef P SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling", sysctl_sched_tunable_scaling, sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); SEQ_printf(m, "\n"); } static int sched_debug_show(struct seq_file *m, void *v) { int cpu = (unsigned long)(v - 2); if (cpu != -1) print_cpu(m, cpu); else sched_debug_header(m); return 0; } #ifdef CONFIG_SYSRQ_SCHED_DEBUG void sysrq_sched_debug_show(void) { int cpu; sched_debug_header(NULL); for_each_online_cpu(cpu) print_cpu(NULL, cpu); } #endif static void *sched_debug_start(struct seq_file *file, loff_t *offset) { unsigned long n = *offset; if (n == 0) return (void *) 1; n--; if (n > 0) n = cpumask_next(n - 1, cpu_online_mask); else n = cpumask_first(cpu_online_mask); *offset = n + 1; if (n < nr_cpu_ids) return (void *)(unsigned long)(n + 2); return NULL; } static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) { (*offset)++; return sched_debug_start(file, offset); } static void sched_debug_stop(struct seq_file *file, void *data) { } static const struct seq_operations sched_debug_sops = { .start = sched_debug_start, .next = sched_debug_next, .stop = sched_debug_stop, .show = sched_debug_show, }; static int sched_debug_release(struct inode *inode, struct file *file) { seq_release(inode, file); return 0; } static int sched_debug_open(struct inode *inode, struct file *filp) { int ret = 0; ret = seq_open(filp, &sched_debug_sops); return ret; } static const struct file_operations sched_debug_fops = { .open = sched_debug_open, .read = seq_read, .llseek = seq_lseek, .release = sched_debug_release, }; static int __init init_sched_debug_procfs(void) { struct proc_dir_entry *pe; pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); if (!pe) return -ENOMEM; return 0; } __initcall(init_sched_debug_procfs); void proc_sched_show_task(struct task_struct *p, struct seq_file *m) { unsigned long nr_switches; unsigned int load_avg; load_avg = pct_task_load(p); SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, get_nr_threads(p)); SEQ_printf(m, "---------------------------------------------------------\n"); #define __P(F) \ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F) #define P(F) \ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F) #define __PN(F) \ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) #define PN(F) \ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) PN(se.exec_start); PN(se.vruntime); PN(se.sum_exec_runtime); nr_switches = p->nvcsw + p->nivcsw; #ifdef CONFIG_SCHEDSTATS PN(se.statistics.wait_start); PN(se.statistics.sleep_start); PN(se.statistics.block_start); PN(se.statistics.sleep_max); PN(se.statistics.block_max); PN(se.statistics.exec_max); PN(se.statistics.slice_max); PN(se.statistics.wait_max); PN(se.statistics.wait_sum); P(se.statistics.wait_count); PN(se.statistics.iowait_sum); P(se.statistics.iowait_count); P(se.nr_migrations); P(se.statistics.nr_migrations_cold); P(se.statistics.nr_failed_migrations_affine); P(se.statistics.nr_failed_migrations_running); P(se.statistics.nr_failed_migrations_hot); P(se.statistics.nr_forced_migrations); P(se.statistics.nr_wakeups); P(se.statistics.nr_wakeups_sync); P(se.statistics.nr_wakeups_migrate); P(se.statistics.nr_wakeups_local); P(se.statistics.nr_wakeups_remote); P(se.statistics.nr_wakeups_affine); P(se.statistics.nr_wakeups_affine_attempts); P(se.statistics.nr_wakeups_passive); P(se.statistics.nr_wakeups_idle); #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) __P(load_avg); #ifdef CONFIG_SCHED_HMP P(ravg.demand); P(se.avg.runnable_avg_sum_scaled); #endif P(se.avg.runnable_avg_sum); P(se.avg.runnable_avg_period); #endif { u64 avg_atom, avg_per_cpu; avg_atom = p->se.sum_exec_runtime; if (nr_switches) do_div(avg_atom, nr_switches); else avg_atom = -1LL; avg_per_cpu = p->se.sum_exec_runtime; if (p->se.nr_migrations) { avg_per_cpu = div64_u64(avg_per_cpu, p->se.nr_migrations); } else { avg_per_cpu = -1LL; } __PN(avg_atom); __PN(avg_per_cpu); } #endif __P(nr_switches); SEQ_printf(m, "%-35s:%21Ld\n", "nr_voluntary_switches", (long long)p->nvcsw); SEQ_printf(m, "%-35s:%21Ld\n", "nr_involuntary_switches", (long long)p->nivcsw); P(se.load.weight); P(policy); P(prio); #undef PN #undef __PN #undef P #undef __P { unsigned int this_cpu = raw_smp_processor_id(); u64 t0, t1; t0 = cpu_clock(this_cpu); t1 = cpu_clock(this_cpu); SEQ_printf(m, "%-35s:%21Ld\n", "clock-delta", (long long)(t1-t0)); } } void proc_sched_set_task(struct task_struct *p) { #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif }
gpl-2.0
museolab/augmented_sandbox
src/Vrui-3.1-002/Comm/ListeningTCPSocket.cpp
7
4730
/*********************************************************************** ListeningTCPSocket - Class for TCP half-sockets that can accept incoming connections. Copyright (c) 2011 Oliver Kreylos This file is part of the Portable Communications Library (Comm). The Portable Communications Library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The Portable Communications Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with the Portable Communications Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ***********************************************************************/ #include <Comm/ListeningTCPSocket.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #include <arpa/inet.h> #include <Misc/ThrowStdErr.h> #include <errno.h> #include <unistd.h> #include <sys/time.h> namespace Comm { /*********************************** Methods of class ListeningTCPSocket: ***********************************/ ListeningTCPSocket::ListeningTCPSocket(int portId,int backlog) :fd(-1) { /* Create the socket file descriptor: */ fd=socket(PF_INET,SOCK_STREAM,0); if(fd<0) Misc::throwStdErr("Comm::ListeningTCPSocket: Unable to create socket"); /* Bind the socket file descriptor to the port ID: */ struct sockaddr_in socketAddress; socketAddress.sin_family=AF_INET; socketAddress.sin_port=portId>=0?htons(portId):0; socketAddress.sin_addr.s_addr=htonl(INADDR_ANY); if(bind(fd,(struct sockaddr*)&socketAddress,sizeof(struct sockaddr_in))==-1) { close(fd); fd=-1; Misc::throwStdErr("Comm::ListeningTCPSocket: Unable to bind socket to port %d",portId); } /* Start listening on the socket: */ if(listen(fd,backlog)==-1) { close(fd); fd=-1; Misc::throwStdErr("Comm::ListeningTCPSocket: Unable to start listening on socket"); } } ListeningTCPSocket::~ListeningTCPSocket(void) { if(fd>=0) close(fd); } int ListeningTCPSocket::getPortId(void) const { struct sockaddr_in socketAddress; #ifdef __SGI_IRIX__ int socketAddressLen=sizeof(struct sockaddr_in); #else socklen_t socketAddressLen=sizeof(struct sockaddr_in); #endif if(getsockname(fd,(struct sockaddr*)&socketAddress,&socketAddressLen)==-1) Misc::throwStdErr("ListeningTCPSocket::getPortId: Unable to query socket's port ID"); return ntohs(socketAddress.sin_port); } std::string ListeningTCPSocket::getAddress(void) const { struct sockaddr_in socketAddress; #ifdef __SGI_IRIX__ int socketAddressLen=sizeof(struct sockaddr_in); #else socklen_t socketAddressLen=sizeof(struct sockaddr_in); #endif if(getsockname(fd,(struct sockaddr*)&socketAddress,&socketAddressLen)==-1) Misc::throwStdErr("ListeningTCPSocket::getAddress: Unable to query socket's interface address"); char resultBuffer[INET_ADDRSTRLEN]; if(inet_ntop(AF_INET,&socketAddress.sin_addr,resultBuffer,INET_ADDRSTRLEN)==0) Misc::throwStdErr("ListeningTCPSocket::getAddress: Unable to convert socket's interface address to dotted notation"); return std::string(resultBuffer); } std::string ListeningTCPSocket::getInterfaceName(bool throwException) const { struct sockaddr_in socketAddress; #ifdef __SGI_IRIX__ int socketAddressLen=sizeof(struct sockaddr_in); #else socklen_t socketAddressLen=sizeof(struct sockaddr_in); #endif if(getsockname(fd,(struct sockaddr*)&socketAddress,&socketAddressLen)==-1) Misc::throwStdErr("ListeningTCPSocket::getInterfaceName: Unable to query socket's interface address"); /* Lookup interface's name: */ std::string result; struct hostent* hostEntry=gethostbyaddr((const char*)&socketAddress.sin_addr,sizeof(struct in_addr),AF_INET); if(hostEntry==0) { /* Fall back to returning address in dotted notation or throwing exception: */ char addressBuffer[INET_ADDRSTRLEN]; if(inet_ntop(AF_INET,&socketAddress.sin_addr,addressBuffer,INET_ADDRSTRLEN)==0) Misc::throwStdErr("ListeningTCPSocket::getInterfaceName: Unable to convert socket's interface address to dotted notation"); if(throwException) Misc::throwStdErr("ListeningTCPSocket::getInterfaceName: Cannot resolve interface address %s",addressBuffer); else result=std::string(addressBuffer); } else result=std::string(hostEntry->h_name); return result; } }
gpl-2.0
treznorx/TF201-9.4.2.7
drivers/net/wireless/ath/ath9k/ar9002_phy.c
263
17011
/* * Copyright (c) 2008-2010 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Programming Atheros 802.11n analog front end radios * * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express * devices have either an external AR2133 analog front end radio for single * band 2.4 GHz communication or an AR5133 analog front end radio for dual * band 2.4 GHz / 5 GHz communication. * * All devices after the AR5416 and AR5418 family starting with the AR9280 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded * into a single-chip and require less programming. * * The following single-chips exist with a respective embedded radio: * * AR9280 - 11n dual-band 2x2 MIMO for PCIe * AR9281 - 11n single-band 1x2 MIMO for PCIe * AR9285 - 11n single-band 1x1 for PCIe * AR9287 - 11n single-band 2x2 MIMO for PCIe * * AR9220 - 11n dual-band 2x2 MIMO for PCI * AR9223 - 11n single-band 2x2 MIMO for PCI * * AR9287 - 11n single-band 1x1 MIMO for USB */ #include "hw.h" #include "ar9002_phy.h" /** * ar9002_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure * @chan: * * This is the function to change channel on single-chip devices, that is * all devices after ar9280. * * This function takes the channel value in MHz and sets * hardware channel value. Assumes writes have been enabled to analog bus. * * Actual Expression, * * For 2GHz channel, * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) * (freq_ref = 40MHz) * * For 5GHz channel, * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10) * (freq_ref = 40MHz/(24>>amodeRefSel)) */ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { u16 bMode, fracMode, aModeRefSel = 0; u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; struct chan_centers centers; u32 refDivA = 24; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); reg32 &= 0xc0000000; if (freq < 4800) { /* 2 GHz, fractional mode */ u32 txctl; int regWrites = 0; bMode = 1; fracMode = 1; aModeRefSel = 0; channelSel = CHANSEL_2G(freq); if (AR_SREV_9287_11_OR_LATER(ah)) { if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484, 1, regWrites); } else { REG_WRITE_ARRAY(&ah->iniCckfirNormal, 1, regWrites); } } else { txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl | AR_PHY_CCK_TX_CTRL_JAPAN); } else { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); } } } else { bMode = 0; fracMode = 0; switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) { case 0: if ((freq % 20) == 0) aModeRefSel = 3; else if ((freq % 10) == 0) aModeRefSel = 2; if (aModeRefSel) break; case 1: default: aModeRefSel = 0; /* * Enable 2G (fractional) mode for channels * which are 5MHz spaced. */ fracMode = 1; refDivA = 1; channelSel = CHANSEL_5G(freq); /* RefDivA setting */ REG_RMW_FIELD(ah, AR_AN_SYNTH9, AR_AN_SYNTH9_REFDIVA, refDivA); } if (!fracMode) { ndiv = (freq * (refDivA >> aModeRefSel)) / 60; channelSel = ndiv & 0x1ff; channelFrac = (ndiv & 0xfffffe00) * 2; channelSel = (channelSel << 17) | channelFrac; } } reg32 = reg32 | (bMode << 29) | (fracMode << 28) | (aModeRefSel << 26) | (channelSel); REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); ah->curchan = chan; ah->curchan_rad_index = -1; return 0; } /** * ar9002_hw_spur_mitigate - convert baseband spur frequency * @ah: atheros hardware structure * @chan: * * For single-chip solutions. Converts to baseband spur frequency given the * input channel frequency and compute register settings below. */ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) { int bb_spur = AR_NO_SPUR; int freq; int bin, cur_bin; int bb_spur_off, spur_subchannel_sd; int spur_freq_sd; int spur_delta_phase; int denominator; int upper, lower, cur_vit_mask; int tmp, newVal; int i; static const int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 }; static const int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 }; static const int inc[4] = { 0, 100, 0, 0 }; struct chan_centers centers; int8_t mask_m[123]; int8_t mask_p[123]; int8_t mask_amt; int tmp_mask; int cur_bb_spur; bool is2GHz = IS_CHAN_2GHZ(chan); memset(&mask_m, 0, sizeof(int8_t) * 123); memset(&mask_p, 0, sizeof(int8_t) * 123); ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; ah->config.spurmode = SPUR_ENABLE_EEPROM; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); if (AR_NO_SPUR == cur_bb_spur) break; if (is2GHz) cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; else cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ; cur_bb_spur = cur_bb_spur - freq; if (IS_CHAN_HT40(chan)) { if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) && (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) { bb_spur = cur_bb_spur; break; } } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) && (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) { bb_spur = cur_bb_spur; break; } } if (AR_NO_SPUR == bb_spur) { REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); return; } else { REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); } bin = bb_spur * 320; tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); ENABLE_REGWRITE_BUFFER(ah); newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal); newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | AR_PHY_SPUR_REG_ENABLE_MASK_PPM | AR_PHY_SPUR_REG_MASK_RATE_SELECT | AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); REG_WRITE(ah, AR_PHY_SPUR_REG, newVal); if (IS_CHAN_HT40(chan)) { if (bb_spur < 0) { spur_subchannel_sd = 1; bb_spur_off = bb_spur + 10; } else { spur_subchannel_sd = 0; bb_spur_off = bb_spur - 10; } } else { spur_subchannel_sd = 0; bb_spur_off = bb_spur; } if (IS_CHAN_HT40(chan)) spur_delta_phase = ((bb_spur * 262144) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; else spur_delta_phase = ((bb_spur * 524288) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; denominator = IS_CHAN_2GHZ(chan) ? 44 : 40; spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff; newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); REG_WRITE(ah, AR_PHY_TIMING11, newVal); newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); cur_bin = -6000; upper = bin + 100; lower = bin - 100; for (i = 0; i < 4; i++) { int pilot_mask = 0; int chan_mask = 0; int bp = 0; for (bp = 0; bp < 30; bp++) { if ((cur_bin > lower) && (cur_bin < upper)) { pilot_mask = pilot_mask | 0x1 << bp; chan_mask = chan_mask | 0x1 << bp; } cur_bin += 100; } cur_bin += inc[i]; REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); REG_WRITE(ah, chan_mask_reg[i], chan_mask); } cur_vit_mask = 6100; upper = bin + 120; lower = bin - 120; for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { /* workaround for gcc bug #37014 */ volatile int tmp_v = abs(cur_vit_mask - bin); if (tmp_v < 75) mask_amt = 1; else mask_amt = 0; if (cur_vit_mask < 0) mask_m[abs(cur_vit_mask / 100)] = mask_amt; else mask_p[cur_vit_mask / 100] = mask_amt; } cur_vit_mask -= 100; } tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) | (mask_m[48] << 26) | (mask_m[49] << 24) | (mask_m[50] << 22) | (mask_m[51] << 20) | (mask_m[52] << 18) | (mask_m[53] << 16) | (mask_m[54] << 14) | (mask_m[55] << 12) | (mask_m[56] << 10) | (mask_m[57] << 8) | (mask_m[58] << 6) | (mask_m[59] << 4) | (mask_m[60] << 2) | (mask_m[61] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); tmp_mask = (mask_m[31] << 28) | (mask_m[32] << 26) | (mask_m[33] << 24) | (mask_m[34] << 22) | (mask_m[35] << 20) | (mask_m[36] << 18) | (mask_m[37] << 16) | (mask_m[48] << 14) | (mask_m[39] << 12) | (mask_m[40] << 10) | (mask_m[41] << 8) | (mask_m[42] << 6) | (mask_m[43] << 4) | (mask_m[44] << 2) | (mask_m[45] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) | (mask_m[18] << 26) | (mask_m[18] << 24) | (mask_m[20] << 22) | (mask_m[20] << 20) | (mask_m[22] << 18) | (mask_m[22] << 16) | (mask_m[24] << 14) | (mask_m[24] << 12) | (mask_m[25] << 10) | (mask_m[26] << 8) | (mask_m[27] << 6) | (mask_m[28] << 4) | (mask_m[29] << 2) | (mask_m[30] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) | (mask_m[2] << 26) | (mask_m[3] << 24) | (mask_m[4] << 22) | (mask_m[5] << 20) | (mask_m[6] << 18) | (mask_m[7] << 16) | (mask_m[8] << 14) | (mask_m[9] << 12) | (mask_m[10] << 10) | (mask_m[11] << 8) | (mask_m[12] << 6) | (mask_m[13] << 4) | (mask_m[14] << 2) | (mask_m[15] << 0); REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); tmp_mask = (mask_p[15] << 28) | (mask_p[14] << 26) | (mask_p[13] << 24) | (mask_p[12] << 22) | (mask_p[11] << 20) | (mask_p[10] << 18) | (mask_p[9] << 16) | (mask_p[8] << 14) | (mask_p[7] << 12) | (mask_p[6] << 10) | (mask_p[5] << 8) | (mask_p[4] << 6) | (mask_p[3] << 4) | (mask_p[2] << 2) | (mask_p[1] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); tmp_mask = (mask_p[30] << 28) | (mask_p[29] << 26) | (mask_p[28] << 24) | (mask_p[27] << 22) | (mask_p[26] << 20) | (mask_p[25] << 18) | (mask_p[24] << 16) | (mask_p[23] << 14) | (mask_p[22] << 12) | (mask_p[21] << 10) | (mask_p[20] << 8) | (mask_p[19] << 6) | (mask_p[18] << 4) | (mask_p[17] << 2) | (mask_p[16] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); tmp_mask = (mask_p[45] << 28) | (mask_p[44] << 26) | (mask_p[43] << 24) | (mask_p[42] << 22) | (mask_p[41] << 20) | (mask_p[40] << 18) | (mask_p[39] << 16) | (mask_p[38] << 14) | (mask_p[37] << 12) | (mask_p[36] << 10) | (mask_p[35] << 8) | (mask_p[34] << 6) | (mask_p[33] << 4) | (mask_p[32] << 2) | (mask_p[31] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) | (mask_p[59] << 26) | (mask_p[58] << 24) | (mask_p[57] << 22) | (mask_p[56] << 20) | (mask_p[55] << 18) | (mask_p[54] << 16) | (mask_p[53] << 14) | (mask_p[52] << 12) | (mask_p[51] << 10) | (mask_p[50] << 8) | (mask_p[49] << 6) | (mask_p[48] << 4) | (mask_p[47] << 2) | (mask_p[46] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); REGWRITE_BUFFER_FLUSH(ah); } static void ar9002_olc_init(struct ath_hw *ah) { u32 i; if (!OLC_FOR_AR9280_20_LATER) return; if (OLC_FOR_AR9287_10_LATER) { REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9, AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL); ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0, AR9287_AN_TXPC0_TXPCMODE, AR9287_AN_TXPC0_TXPCMODE_S, AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE); udelay(100); } else { for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++) ah->originalGain[i] = MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4), AR_PHY_TX_GAIN); ah->PDADCdelta = 0; } } static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); if (chan && IS_CHAN_HALF_RATE(chan)) pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); else if (chan && IS_CHAN_QUARTER_RATE(chan)) pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); if (chan && IS_CHAN_5GHZ(chan)) { if (IS_CHAN_A_FAST_CLOCK(ah, chan)) pll = 0x142c; else if (AR_SREV_9280_20(ah)) pll = 0x2850; else pll |= SM(0x28, AR_RTC_9160_PLL_DIV); } else { pll |= SM(0x2c, AR_RTC_9160_PLL_DIV); } return pll; } static void ar9002_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); nfarray[0] = sign_extend32(nf, 8); nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); if (IS_CHAN_HT40(ah->curchan)) nfarray[3] = sign_extend32(nf, 8); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) return; nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); nfarray[1] = sign_extend32(nf, 8); nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); if (IS_CHAN_HT40(ah->curchan)) nfarray[4] = sign_extend32(nf, 8); } static void ar9002_hw_set_nf_limits(struct ath_hw *ah) { if (AR_SREV_9285(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9285_2GHZ; } else if (AR_SREV_9287(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ; } else if (AR_SREV_9271(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9271_2GHZ; } else { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9280_2GHZ; ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ; ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ; ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9280_5GHZ; } } void ar9002_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); priv_ops->set_rf_regs = NULL; priv_ops->rf_alloc_ext_banks = NULL; priv_ops->rf_free_ext_banks = NULL; priv_ops->rf_set_freq = ar9002_hw_set_channel; priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate; priv_ops->olc_init = ar9002_olc_init; priv_ops->compute_pll_control = ar9002_hw_compute_pll_control; priv_ops->do_getnf = ar9002_hw_do_getnf; ar9002_hw_set_nf_limits(ah); } void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); antconf->main_lna_conf = (regval & AR_PHY_9285_ANT_DIV_MAIN_LNACONF) >> AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S; antconf->alt_lna_conf = (regval & AR_PHY_9285_ANT_DIV_ALT_LNACONF) >> AR_PHY_9285_ANT_DIV_ALT_LNACONF_S; antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >> AR_PHY_9285_FAST_DIV_BIAS_S; } EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_get); void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regval &= ~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF | AR_PHY_9285_ANT_DIV_ALT_LNACONF | AR_PHY_9285_FAST_DIV_BIAS); regval |= ((antconf->main_lna_conf << AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S) & AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regval |= ((antconf->alt_lna_conf << AR_PHY_9285_ANT_DIV_ALT_LNACONF_S) & AR_PHY_9285_ANT_DIV_ALT_LNACONF); regval |= ((antconf->fast_div_bias << AR_PHY_9285_FAST_DIV_BIAS_S) & AR_PHY_9285_FAST_DIV_BIAS); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); } EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_set);
gpl-2.0
ivanich/android_kernel_oneplus_msm8996
drivers/char/agp/amd64-agp.c
775
20577
/* * Copyright 2001-2003 SuSE Labs. * Distributed under the GNU public license, v2. * * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge. * It also includes support for the AMD 8151 AGP bridge, * although it doesn't actually do much, as all the real * work is done in the northbridge(s). */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820.h> #include <asm/amd_nb.h> #include <asm/gart.h> #include "agp.h" /* NVIDIA K8 registers */ #define NVIDIA_X86_64_0_APBASE 0x10 #define NVIDIA_X86_64_1_APBASE1 0x50 #define NVIDIA_X86_64_1_APLIMIT1 0x54 #define NVIDIA_X86_64_1_APSIZE 0xa8 #define NVIDIA_X86_64_1_APBASE2 0xd8 #define NVIDIA_X86_64_1_APLIMIT2 0xdc /* ULi K8 registers */ #define ULI_X86_64_BASE_ADDR 0x10 #define ULI_X86_64_HTT_FEA_REG 0x50 #define ULI_X86_64_ENU_SCR_REG 0x54 static struct resource *aperture_resource; static bool __initdata agp_try_unsupported = 1; static int agp_bridges_found; static void amd64_tlbflush(struct agp_memory *temp) { amd_flush_garts(); } static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; long long tmp; int mask_type; struct agp_bridge_data *bridge = mem->bridge; u32 pte; num_entries = agp_num_entries(); if (type != mem->type) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) return -EINVAL; /* Make sure we can fit the range in the gatt table. */ /* FIXME: could wrap */ if (((unsigned long)pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; /* gatt table should be empty. */ while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { tmp = agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mask_type); BUG_ON(tmp & 0xffffff0000000ffcULL); pte = (tmp & 0x000000ff00000000ULL) >> 28; pte |=(tmp & 0x00000000fffff000ULL); pte |= GPTE_VALID | GPTE_COHERENT; writel(pte, agp_bridge->gatt_table+j); readl(agp_bridge->gatt_table+j); /* PCI Posting. */ } amd64_tlbflush(mem); return 0; } /* * This hack alters the order element according * to the size of a long. It sucks. I totally disown this, even * though it does appear to work for the most part. */ static struct aper_size_info_32 amd64_aperture_sizes[7] = { {32, 8192, 3+(sizeof(long)/8), 0 }, {64, 16384, 4+(sizeof(long)/8), 1<<1 }, {128, 32768, 5+(sizeof(long)/8), 1<<2 }, {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 }, {512, 131072, 7+(sizeof(long)/8), 1<<3 }, {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3}, {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3} }; /* * Get the current Aperture size from the x86-64. * Note, that there may be multiple x86-64's, but we just return * the value from the first one we find. The set_size functions * keep the rest coherent anyway. Or at least should do. */ static int amd64_fetch_size(void) { struct pci_dev *dev; int i; u32 temp; struct aper_size_info_32 *values; dev = node_to_amd_nb(0)->misc; if (dev==NULL) return 0; pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp); temp = (temp & 0xe); values = A_SIZE_32(amd64_aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } /* * In a multiprocessor x86-64 system, this function gets * called once for each CPU. */ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) { u64 aperturebase; u32 tmp; u64 aper_base; /* Address to map to */ pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); aperturebase = tmp << 25; aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); enable_gart_translation(hammer, gatt_table); return aper_base; } static const struct aper_size_info_32 amd_8151_sizes[7] = { {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */ {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */ {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */ {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */ {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */ {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */ {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */ }; static int amd_8151_configure(void) { unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); int i; if (!amd_nb_has_feature(AMD_NB_GART)) return 0; /* Configure AGP regs in each x86-64 host bridge. */ for (i = 0; i < amd_nb_num(); i++) { agp_bridge->gart_bus_addr = amd64_configure(node_to_amd_nb(i)->misc, gatt_bus); } amd_flush_garts(); return 0; } static void amd64_cleanup(void) { u32 tmp; int i; if (!amd_nb_has_feature(AMD_NB_GART)) return; for (i = 0; i < amd_nb_num(); i++) { struct pci_dev *dev = node_to_amd_nb(i)->misc; /* disable gart translation */ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); tmp &= ~GARTEN; pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); } } static const struct agp_bridge_driver amd_8151_driver = { .owner = THIS_MODULE, .aperture_sizes = amd_8151_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = amd_8151_configure, .fetch_size = amd64_fetch_size, .cleanup = amd64_cleanup, .tlb_flush = amd64_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = amd64_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; /* Some basic sanity checks for the aperture. */ static int agp_aperture_valid(u64 aper, u32 size) { if (!aperture_valid(aper, size, 32*1024*1024)) return 0; /* Request the Aperture. This catches cases when someone else already put a mapping in there - happens with some very broken BIOS Maybe better to use pci_assign_resource/pci_enable_device instead trusting the bridges? */ if (!aperture_resource && !(aperture_resource = request_mem_region(aper, size, "aperture"))) { printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n"); return 0; } return 1; } /* * W*s centric BIOS sometimes only set up the aperture in the AGP * bridge, not the northbridge. On AMD64 this is handled early * in aperture.c, but when IOMMU is not enabled or we run * on a 32bit kernel this needs to be redone. * Unfortunately it is impossible to fix the aperture here because it's too late * to allocate that much memory. But at least error out cleanly instead of * crashing. */ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) { u64 aper, nb_aper; int order = 0; u32 nb_order, nb_base; u16 apsize; pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); nb_order = (nb_order >> 1) & 7; pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); nb_aper = nb_base << 25; /* Northbridge seems to contain crap. Try the AGP bridge. */ pci_read_config_word(agp, cap+0x14, &apsize); if (apsize == 0xffff) { if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) return 0; return -1; } apsize &= 0xfff; /* Some BIOS use weird encodings not in the AGPv3 table. */ if (apsize & 0xff) apsize |= 0xf00; order = 7 - hweight16(apsize); aper = pci_bus_address(agp, AGP_APERTURE_BAR); /* * On some sick chips APSIZE is 0. This means it wants 4G * so let double check that order, and lets trust the AMD NB settings */ if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n", 32 << order); order = nb_order; } if (nb_order >= order) { if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) return 0; } dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) return -1; gart_set_size_and_enable(nb, order); pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); return 0; } static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) { int i; if (amd_cache_northbridges() < 0) return -ENODEV; if (!amd_nb_has_feature(AMD_NB_GART)) return -ENODEV; i = 0; for (i = 0; i < amd_nb_num(); i++) { struct pci_dev *dev = node_to_amd_nb(i)->misc; if (fix_northbridge(dev, pdev, cap_ptr) < 0) { dev_err(&dev->dev, "no usable aperture found\n"); #ifdef __x86_64__ /* should port this to i386 */ dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n"); #endif return -1; } } return 0; } /* Handle AMD 8151 quirks */ static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge) { char *revstring; switch (pdev->revision) { case 0x01: revstring="A0"; break; case 0x02: revstring="A1"; break; case 0x11: revstring="B0"; break; case 0x12: revstring="B1"; break; case 0x13: revstring="B2"; break; case 0x14: revstring="B3"; break; default: revstring="??"; break; } dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring); /* * Work around errata. * Chips before B2 stepping incorrectly reporting v3.5 */ if (pdev->revision < 0x13) { dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n"); bridge->major_version = 3; bridge->minor_version = 0; } } static const struct aper_size_info_32 uli_sizes[7] = { {256, 65536, 6, 10}, {128, 32768, 5, 9}, {64, 16384, 4, 8}, {32, 8192, 3, 7}, {16, 4096, 2, 6}, {8, 2048, 1, 4}, {4, 1024, 0, 3} }; static int uli_agp_init(struct pci_dev *pdev) { u32 httfea,baseaddr,enuscr; struct pci_dev *dev1; int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up ULi AGP\n"); dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); if (dev1 == NULL) { dev_info(&pdev->dev, "can't find ULi secondary device\n"); return -ENODEV; } for (i = 0; i < ARRAY_SIZE(uli_sizes); i++) if (uli_sizes[i].size == size) break; if (i == ARRAY_SIZE(uli_sizes)) { dev_info(&pdev->dev, "no ULi size found for %d\n", size); ret = -ENODEV; goto put; } /* shadow x86-64 registers into ULi registers */ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ if ((httfea & 0x7fff) >> (32 - 25)) { ret = -ENODEV; goto put; } httfea = (httfea& 0x7fff) << 25; pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr); baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK; baseaddr|= httfea; pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr); enuscr= httfea+ (size * 1024 * 1024) - 1; pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); ret = 0; put: pci_dev_put(dev1); return ret; } static const struct aper_size_info_32 nforce3_sizes[5] = { {512, 131072, 7, 0x00000000 }, {256, 65536, 6, 0x00000008 }, {128, 32768, 5, 0x0000000C }, {64, 16384, 4, 0x0000000E }, {32, 8192, 3, 0x0000000F } }; /* Handle shadow device of the Nvidia NForce3 */ /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ static int nforce3_agp_init(struct pci_dev *pdev) { u32 tmp, apbase, apbar, aplimit; struct pci_dev *dev1; int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); if (dev1 == NULL) { dev_info(&pdev->dev, "can't find Nforce3 secondary device\n"); return -ENODEV; } for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++) if (nforce3_sizes[i].size == size) break; if (i == ARRAY_SIZE(nforce3_sizes)) { dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); ret = -ENODEV; goto put; } pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); tmp &= ~(0xf); tmp |= nforce3_sizes[i].size_value; pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); /* shadow x86-64 registers into NVIDIA registers */ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, &apbase); /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { dev_info(&pdev->dev, "aperture base > 4G\n"); ret = -ENODEV; goto put; } apbase = (apbase & 0x7fff) << 25; pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar); apbar &= ~PCI_BASE_ADDRESS_MEM_MASK; apbar |= apbase; pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar); aplimit = apbase + (size * 1024 * 1024) - 1; pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); ret = 0; put: pci_dev_put(dev1); return ret; } static int agp_amd64_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; int err; /* The Highlander principle */ if (agp_bridges_found) return -ENODEV; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* Could check for AGPv3 here */ bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_8151_0) { amd8151_init(pdev, bridge); } else { dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n", pdev->vendor, pdev->device); } bridge->driver = &amd_8151_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); if (cache_nbs(pdev, cap_ptr) == -1) { agp_put_bridge(bridge); return -ENODEV; } if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { int ret = nforce3_agp_init(pdev); if (ret) { agp_put_bridge(bridge); return ret; } } if (pdev->vendor == PCI_VENDOR_ID_AL) { int ret = uli_agp_init(pdev); if (ret) { agp_put_bridge(bridge); return ret; } } pci_set_drvdata(pdev, bridge); err = agp_add_bridge(bridge); if (err < 0) return err; agp_bridges_found++; return 0; } static void agp_amd64_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); release_mem_region(virt_to_phys(bridge->gatt_table_real), amd64_aperture_sizes[bridge->aperture_size_idx].size); agp_remove_bridge(bridge); agp_put_bridge(bridge); agp_bridges_found--; } #ifdef CONFIG_PM static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state) { pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int agp_amd64_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) nforce3_agp_init(pdev); return amd_8151_configure(); } #endif /* CONFIG_PM */ static struct pci_device_id agp_amd64_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_8151_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* ULi M1689 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AL, .device = PCI_DEVICE_ID_AL_M1689, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T800Pro */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_K8T800PRO_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T800 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_8385_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8M800 / K8N800 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_8380_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8M890 / K8N890 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_VT3336, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T890 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_3238_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T800/K8M800/K8N800 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_838X_1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* NForce3 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_NVIDIA, .device = PCI_DEVICE_ID_NVIDIA_NFORCE3, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_NVIDIA, .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* SIS 755 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_755, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* SIS 760 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_760, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* ALI/ULI M1695 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AL, .device = 0x1695, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); static const struct pci_device_id agp_amd64_pci_promisc_table[] = { { PCI_DEVICE_CLASS(0, 0) }, { } }; static struct pci_driver agp_amd64_pci_driver = { .name = "agpgart-amd64", .id_table = agp_amd64_pci_table, .probe = agp_amd64_probe, .remove = agp_amd64_remove, #ifdef CONFIG_PM .suspend = agp_amd64_suspend, .resume = agp_amd64_resume, #endif }; /* Not static due to IOMMU code calling it early. */ int __init agp_amd64_init(void) { int err = 0; if (agp_off) return -EINVAL; err = pci_register_driver(&agp_amd64_pci_driver); if (err < 0) return err; if (agp_bridges_found == 0) { if (!agp_try_unsupported && !agp_try_unsupported_boot) { printk(KERN_INFO PFX "No supported AGP bridge found.\n"); #ifdef MODULE printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n"); #else printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); #endif pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } /* First check that we have at least one AMD64 NB */ if (!pci_dev_present(amd_nb_misc_ids)) { pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } /* Look for any AGP bridge */ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; err = driver_attach(&agp_amd64_pci_driver.driver); if (err == 0 && agp_bridges_found == 0) { pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; } } return err; } static int __init agp_amd64_mod_init(void) { #ifndef MODULE if (gart_iommu_aperture) return agp_bridges_found ? 0 : -ENODEV; #endif return agp_amd64_init(); } static void __exit agp_amd64_cleanup(void) { #ifndef MODULE if (gart_iommu_aperture) return; #endif if (aperture_resource) release_resource(aperture_resource); pci_unregister_driver(&agp_amd64_pci_driver); } module_init(agp_amd64_mod_init); module_exit(agp_amd64_cleanup); MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); module_param(agp_try_unsupported, bool, 0); MODULE_LICENSE("GPL");
gpl-2.0
lukego/linux
drivers/input/misc/cobalt_btns.c
1543
4284
/* * Cobalt button interface driver. * * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/input-polldev.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #define BUTTONS_POLL_INTERVAL 30 /* msec */ #define BUTTONS_COUNT_THRESHOLD 3 #define BUTTONS_STATUS_MASK 0xfe000000 static const unsigned short cobalt_map[] = { KEY_RESERVED, KEY_RESTART, KEY_LEFT, KEY_UP, KEY_DOWN, KEY_RIGHT, KEY_ENTER, KEY_SELECT }; struct buttons_dev { struct input_polled_dev *poll_dev; unsigned short keymap[ARRAY_SIZE(cobalt_map)]; int count[ARRAY_SIZE(cobalt_map)]; void __iomem *reg; }; static void handle_buttons(struct input_polled_dev *dev) { struct buttons_dev *bdev = dev->private; struct input_dev *input = dev->input; uint32_t status; int i; status = ~readl(bdev->reg) >> 24; for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) { if (status & (1U << i)) { if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) { input_event(input, EV_MSC, MSC_SCAN, i); input_report_key(input, bdev->keymap[i], 1); input_sync(input); } } else { if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) { input_event(input, EV_MSC, MSC_SCAN, i); input_report_key(input, bdev->keymap[i], 0); input_sync(input); } bdev->count[i] = 0; } } } static int cobalt_buttons_probe(struct platform_device *pdev) { struct buttons_dev *bdev; struct input_polled_dev *poll_dev; struct input_dev *input; struct resource *res; int error, i; bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL); poll_dev = input_allocate_polled_device(); if (!bdev || !poll_dev) { error = -ENOMEM; goto err_free_mem; } memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap)); poll_dev->private = bdev; poll_dev->poll = handle_buttons; poll_dev->poll_interval = BUTTONS_POLL_INTERVAL; input = poll_dev->input; input->name = "Cobalt buttons"; input->phys = "cobalt/input0"; input->id.bustype = BUS_HOST; input->dev.parent = &pdev->dev; input->keycode = bdev->keymap; input->keycodemax = ARRAY_SIZE(bdev->keymap); input->keycodesize = sizeof(unsigned short); input_set_capability(input, EV_MSC, MSC_SCAN); __set_bit(EV_KEY, input->evbit); for (i = 0; i < ARRAY_SIZE(cobalt_map); i++) __set_bit(bdev->keymap[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { error = -EBUSY; goto err_free_mem; } bdev->poll_dev = poll_dev; bdev->reg = ioremap(res->start, resource_size(res)); dev_set_drvdata(&pdev->dev, bdev); error = input_register_polled_device(poll_dev); if (error) goto err_iounmap; return 0; err_iounmap: iounmap(bdev->reg); err_free_mem: input_free_polled_device(poll_dev); kfree(bdev); return error; } static int cobalt_buttons_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct buttons_dev *bdev = dev_get_drvdata(dev); input_unregister_polled_device(bdev->poll_dev); input_free_polled_device(bdev->poll_dev); iounmap(bdev->reg); kfree(bdev); return 0; } MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_DESCRIPTION("Cobalt button interface driver"); MODULE_LICENSE("GPL"); /* work with hotplug and coldplug */ MODULE_ALIAS("platform:Cobalt buttons"); static struct platform_driver cobalt_buttons_driver = { .probe = cobalt_buttons_probe, .remove = cobalt_buttons_remove, .driver = { .name = "Cobalt buttons", }, }; module_platform_driver(cobalt_buttons_driver);
gpl-2.0
ktd2004/linux-stable
arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
1799
1106
/* * platform_emc1403.c: emc1403 platform data initilization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/init.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <asm/intel-mid.h> static void __init *emc1403_platform_data(void *info) { static short intr2nd_pdata; struct i2c_board_info *i2c_info = info; int intr = get_gpio_by_name("thermal_int"); int intr2nd = get_gpio_by_name("thermal_alert"); if (intr < 0) return NULL; if (intr2nd < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET; return &intr2nd_pdata; } static const struct devs_id emc1403_dev_id __initconst = { .name = "emc1403", .type = SFI_DEV_TYPE_I2C, .delay = 1, .get_platform_data = &emc1403_platform_data, }; sfi_device(emc1403_dev_id);
gpl-2.0
SkrilaxCZ/android_kernel_moto_asanti_c
arch/arm/mach-exynos4/platsmp.c
1799
4226
/* linux/arch/arm/mach-exynos4/platsmp.c * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Cloned from linux/arch/arm/mach-vexpress/platsmp.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/hardware/gic.h> #include <asm/smp_scu.h> #include <asm/unified.h> #include <mach/hardware.h> #include <mach/regs-clock.h> extern void exynos4_secondary_startup(void); /* * control for which core is the next to come out of the secondary * boot "holding pen" */ volatile int __cpuinitdata pen_release = -1; /* * Write pen_release in a way that is guaranteed to be visible to all * observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ static void write_pen_release(int val) { pen_release = val; smp_wmb(); __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); } static void __iomem *scu_base_addr(void) { return (void __iomem *)(S5P_VA_SCU); } static DEFINE_SPINLOCK(boot_lock); void __cpuinit platform_secondary_init(unsigned int cpu) { /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled * for us: do so */ gic_secondary_init(0); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ write_pen_release(-1); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ gic_raise_softirq(cpumask_of(cpu), 1); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "EXYNOS4: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); } void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); }
gpl-2.0
gianogli/boeffla-kernel-cm-s3-7
drivers/net/wireless/b43/tables_lpphy.c
2311
153877
/* Broadcom B43 wireless driver IEEE 802.11a/g LP-PHY and radio device data tables Copyright (c) 2009 Michael Buesch <mb@bu3sch.de> Copyright (c) 2009 Gábor Stefanik <netrolller.3d@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "tables_lpphy.h" #include "phy_common.h" #include "phy_lp.h" /* Entry of the 2062/2063 radio init table */ struct b206x_init_tab_entry { u16 offset; u16 value_a; u16 value_g; u8 flags; }; #define B206X_FLAG_A 0x01 /* Flag: Init in A mode */ #define B206X_FLAG_G 0x02 /* Flag: Init in G mode */ static const struct b206x_init_tab_entry b2062_init_tab[] = { /* { .offset = B2062_N_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = 0x0001, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_N_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_PDN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_N_PDN_CTL1, .value_a = 0x0000, .value_g = 0x00CA, .flags = B206X_FLAG_G, }, /* { .offset = B2062_N_PDN_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */ { .offset = B2062_N_PDN_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_N_PDN_CTL4, .value_a = 0x0015, .value_g = 0x002A, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_GEN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_IQ_CALIB, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ { .offset = B2062_N_LGENC, .value_a = 0x00DB, .value_g = 0x00FF, .flags = B206X_FLAG_A, }, /* { .offset = B2062_N_LGENA_LPF, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2062_N_LGENA_BIAS0, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */ /* { .offset = B2062_N_LGNEA_BIAS1, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ /* { .offset = B2062_N_LGENA_CTL0, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ /* { .offset = B2062_N_LGENA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_LGENA_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_N_LGENA_TUNE0, .value_a = 0x00DD, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_LGENA_TUNE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_N_LGENA_TUNE2, .value_a = 0x00DD, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_N_LGENA_TUNE3, .value_a = 0x0077, .value_g = 0x00B5, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_N_LGENA_CTL3, .value_a = 0x0000, .value_g = 0x00FF, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_LGENA_CTL4, .value_a = 0x001F, .value_g = 0x001F, .flags = 0, }, */ /* { .offset = B2062_N_LGENA_CTL5, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ /* { .offset = B2062_N_LGENA_CTL6, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ { .offset = B2062_N_LGENA_CTL7, .value_a = 0x0033, .value_g = 0x0033, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_RXA_CTL0, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */ { .offset = B2062_N_RXA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, /* { .offset = B2062_N_RXA_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */ /* { .offset = B2062_N_RXA_CTL3, .value_a = 0x0027, .value_g = 0x0027, .flags = 0, }, */ /* { .offset = B2062_N_RXA_CTL4, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */ /* { .offset = B2062_N_RXA_CTL5, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ /* { .offset = B2062_N_RXA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_RXA_CTL7, .value_a = 0x0008, .value_g = 0x0008, .flags = 0, }, */ { .offset = B2062_N_RXBB_CTL0, .value_a = 0x0082, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_RXBB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_GAIN0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_N_RXBB_GAIN1, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_N_RXBB_GAIN2, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_RXBB_GAIN3, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_RSSI0, .value_a = 0x0043, .value_g = 0x0043, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_RSSI1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_CALIB0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_BIAS0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_BIAS1, .value_a = 0x002A, .value_g = 0x002A, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_BIAS2, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_BIAS3, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_BIAS4, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_BIAS5, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_RSSI2, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_RSSI3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_RSSI4, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2062_N_RXBB_RSSI5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL0, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL2, .value_a = 0x0084, .value_g = 0x0084, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_N_TX_CTL4, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_N_TX_CTL5, .value_a = 0x0002, .value_g = 0x0002, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_TX_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL7, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL8, .value_a = 0x0082, .value_g = 0x0082, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TX_CTL_A, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TX_GC2G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */ /* { .offset = B2062_N_TX_GC5G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */ { .offset = B2062_N_TX_TUNE, .value_a = 0x0088, .value_g = 0x001B, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_N_TX_PAD, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2062_N_TX_PGA, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2062_N_TX_PADAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2062_N_TX_PGAAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2062_N_TSSI_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TSSI_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TSSI_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_IQ_CALIB_CTL0, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2062_N_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_N_IQ_CALIB_CTL2, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_TS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_CTL1, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_CTL2, .value_a = 0x000F, .value_g = 0x000F, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_DBG0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_DBG1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_DBG2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_CALIB_DBG3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_PSENSE_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_PSENSE_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_PSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_N_TEST_BUF0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RADIO_ID_CODE, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_PDS_CTL0, .value_a = 0x00FF, .value_g = 0x00FF, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_PDS_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_PDS_CTL2, .value_a = 0x008E, .value_g = 0x008E, .flags = 0, }, */ /* { .offset = B2062_S_PDS_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_BG_CTL0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ /* { .offset = B2062_S_BG_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_BG_CTL2, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ { .offset = B2062_S_LGENG_CTL0, .value_a = 0x00F8, .value_g = 0x00D8, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_LGENG_CTL1, .value_a = 0x003C, .value_g = 0x0024, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_LGENG_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_LGENG_CTL3, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */ /* { .offset = B2062_S_LGENG_CTL4, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ /* { .offset = B2062_S_LGENG_CTL5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2062_S_LGENG_CTL6, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */ /* { .offset = B2062_S_LGENG_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_LGENG_CTL8, .value_a = 0x0088, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_LGENG_CTL9, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ { .offset = B2062_S_LGENG_CTL10, .value_a = 0x0088, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_LGENG_CTL11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL2, .value_a = 0x00AF, .value_g = 0x00AF, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL3, .value_a = 0x0012, .value_g = 0x0012, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL4, .value_a = 0x000B, .value_g = 0x000B, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL5, .value_a = 0x005F, .value_g = 0x005F, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL7, .value_a = 0x0040, .value_g = 0x0040, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL8, .value_a = 0x0052, .value_g = 0x0052, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL9, .value_a = 0x0026, .value_g = 0x0026, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL10, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL11, .value_a = 0x0036, .value_g = 0x0036, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL12, .value_a = 0x0057, .value_g = 0x0057, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL13, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL14, .value_a = 0x0075, .value_g = 0x0075, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL15, .value_a = 0x00B4, .value_g = 0x00B4, .flags = 0, }, */ /* { .offset = B2062_S_REFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_RFPLL_CTL0, .value_a = 0x0098, .value_g = 0x0098, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL1, .value_a = 0x0010, .value_g = 0x0010, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_RFPLL_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_RFPLL_CTL5, .value_a = 0x0043, .value_g = 0x0043, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL6, .value_a = 0x0047, .value_g = 0x0047, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL7, .value_a = 0x000C, .value_g = 0x000C, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL8, .value_a = 0x0011, .value_g = 0x0011, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL9, .value_a = 0x0011, .value_g = 0x0011, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL10, .value_a = 0x000E, .value_g = 0x000E, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL11, .value_a = 0x0008, .value_g = 0x0008, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL12, .value_a = 0x0033, .value_g = 0x0033, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL13, .value_a = 0x000A, .value_g = 0x000A, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL14, .value_a = 0x0006, .value_g = 0x0006, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_RFPLL_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL17, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_RFPLL_CTL18, .value_a = 0x003E, .value_g = 0x003E, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL19, .value_a = 0x0013, .value_g = 0x0013, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_RFPLL_CTL20, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_RFPLL_CTL21, .value_a = 0x0062, .value_g = 0x0062, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL22, .value_a = 0x0007, .value_g = 0x0007, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL23, .value_a = 0x0016, .value_g = 0x0016, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL24, .value_a = 0x005C, .value_g = 0x005C, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL25, .value_a = 0x0095, .value_g = 0x0095, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_RFPLL_CTL26, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL27, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL28, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RFPLL_CTL29, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_RFPLL_CTL30, .value_a = 0x00A0, .value_g = 0x00A0, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL31, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_RFPLL_CTL32, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2062_S_RFPLL_CTL33, .value_a = 0x00CC, .value_g = 0x00CC, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2062_S_RFPLL_CTL34, .value_a = 0x0007, .value_g = 0x0007, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2062_S_RXG_CNT0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT6, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ { .offset = B2062_S_RXG_CNT8, .value_a = 0x000F, .value_g = 0x000F, .flags = B206X_FLAG_A, }, /* { .offset = B2062_S_RXG_CNT9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT10, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT11, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT12, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT13, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT14, .value_a = 0x00A0, .value_g = 0x00A0, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT15, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2062_S_RXG_CNT17, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ }; static const struct b206x_init_tab_entry b2063_init_tab[] = { { .offset = B2063_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, /* { .offset = B2063_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_COMM10, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A, }, /* { .offset = B2063_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_COMM14, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ /* { .offset = B2063_COMM15, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ { .offset = B2063_COMM16, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM17, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM18, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM19, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM20, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM21, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM22, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM23, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, { .offset = B2063_COMM24, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, /* { .offset = B2063_PWR_SWITCH_CTL, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */ /* { .offset = B2063_PLL_SP1, .value_a = 0x003f, .value_g = 0x003f, .flags = 0, }, */ /* { .offset = B2063_PLL_SP2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_LOGEN_SP1, .value_a = 0x00e8, .value_g = 0x00d4, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2063_LOGEN_SP2, .value_a = 0x00a7, .value_g = 0x0053, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_LOGEN_SP3, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ { .offset = B2063_LOGEN_SP4, .value_a = 0x00f0, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_LOGEN_SP5, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ { .offset = B2063_G_RX_SP1, .value_a = 0x001f, .value_g = 0x005e, .flags = B206X_FLAG_G, }, { .offset = B2063_G_RX_SP2, .value_a = 0x007f, .value_g = 0x007e, .flags = B206X_FLAG_G, }, { .offset = B2063_G_RX_SP3, .value_a = 0x0030, .value_g = 0x00f0, .flags = B206X_FLAG_G, }, /* { .offset = B2063_G_RX_SP4, .value_a = 0x0035, .value_g = 0x0035, .flags = 0, }, */ /* { .offset = B2063_G_RX_SP5, .value_a = 0x003f, .value_g = 0x003f, .flags = 0, }, */ /* { .offset = B2063_G_RX_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_G_RX_SP7, .value_a = 0x007f, .value_g = 0x007f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_G_RX_SP8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_SP9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_G_RX_SP10, .value_a = 0x000c, .value_g = 0x000c, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_G_RX_SP11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_A_RX_SP1, .value_a = 0x003c, .value_g = 0x003f, .flags = B206X_FLAG_A, }, { .offset = B2063_A_RX_SP2, .value_a = 0x00fc, .value_g = 0x00fe, .flags = B206X_FLAG_A, }, /* { .offset = B2063_A_RX_SP3, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ /* { .offset = B2063_A_RX_SP4, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ /* { .offset = B2063_A_RX_SP5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_A_RX_SP7, .value_a = 0x0008, .value_g = 0x0008, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_RX_BB_SP1, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ /* { .offset = B2063_RX_BB_SP2, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */ /* { .offset = B2063_RX_BB_SP3, .value_a = 0x00a8, .value_g = 0x00a8, .flags = 0, }, */ { .offset = B2063_RX_BB_SP4, .value_a = 0x0060, .value_g = 0x0060, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_RX_BB_SP5, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ /* { .offset = B2063_RX_BB_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RX_BB_SP7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_RX_BB_SP8, .value_a = 0x0030, .value_g = 0x0030, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_TX_RF_SP1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP2, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ { .offset = B2063_TX_RF_SP3, .value_a = 0x000c, .value_g = 0x000b, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2063_TX_RF_SP4, .value_a = 0x0010, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_TX_RF_SP5, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP6, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP7, .value_a = 0x0068, .value_g = 0x0068, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP8, .value_a = 0x0068, .value_g = 0x0068, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP9, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP10, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP11, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP12, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP13, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP14, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP15, .value_a = 0x00c0, .value_g = 0x00c0, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP16, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ /* { .offset = B2063_TX_RF_SP17, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ { .offset = B2063_PA_SP1, .value_a = 0x003d, .value_g = 0x00fd, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_PA_SP2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ /* { .offset = B2063_PA_SP3, .value_a = 0x0096, .value_g = 0x0096, .flags = 0, }, */ /* { .offset = B2063_PA_SP4, .value_a = 0x005a, .value_g = 0x005a, .flags = 0, }, */ /* { .offset = B2063_PA_SP5, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */ /* { .offset = B2063_PA_SP6, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */ /* { .offset = B2063_PA_SP7, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ { .offset = B2063_TX_BB_SP1, .value_a = 0x0002, .value_g = 0x0002, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_TX_BB_SP2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_BB_SP3, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */ /* { .offset = B2063_REG_SP1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_BANDGAP_CTL1, .value_a = 0x0056, .value_g = 0x0056, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_BANDGAP_CTL2, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ /* { .offset = B2063_LPO_CTL1, .value_a = 0x000e, .value_g = 0x000e, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL1, .value_a = 0x007e, .value_g = 0x007e, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL2, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL3, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RC_CALIB_CTL10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_CALNRST, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_IN_PLL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_IN_PLL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_CP1, .value_a = 0x00cf, .value_g = 0x00cf, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_CP2, .value_a = 0x0059, .value_g = 0x0059, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_CP3, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_CP4, .value_a = 0x0042, .value_g = 0x0042, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_LF1, .value_a = 0x00db, .value_g = 0x00db, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_LF2, .value_a = 0x0094, .value_g = 0x0094, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_LF3, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_LF4, .value_a = 0x0063, .value_g = 0x0063, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_SG1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_SG2, .value_a = 0x00d3, .value_g = 0x00d3, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_SG3, .value_a = 0x00b1, .value_g = 0x00b1, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_SG4, .value_a = 0x003b, .value_g = 0x003b, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_SG5, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO1, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ { .offset = B2063_PLL_JTAG_PLL_VCO2, .value_a = 0x00f7, .value_g = 0x00f7, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB3, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB5, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB6, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB7, .value_a = 0x0016, .value_g = 0x0016, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB8, .value_a = 0x006b, .value_g = 0x006b, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB10, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_XTAL_12, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */ /* { .offset = B2063_PLL_JTAG_PLL_XTAL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_ACL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_ACL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_ACL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_ACL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_ACL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_INPUTS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_WAITCNT, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVR1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVR2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL3, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL4, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL5, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL6, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_OVAL7, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CALVLD1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CALVLD2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LO_CALIB_CVAL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_CALIB_EN, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_PEAKDET1, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ /* { .offset = B2063_LOGEN_RCCR1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_VCOBUF1, .value_a = 0x0060, .value_g = 0x0060, .flags = 0, }, */ /* { .offset = B2063_LOGEN_MIXER1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_MIXER2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ /* { .offset = B2063_LOGEN_BUF1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_BUF2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ /* { .offset = B2063_LOGEN_DIV1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2063_LOGEN_DIV2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_DIV3, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_CBUFRX1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_CBUFRX2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_CBUFTX1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_CBUFTX2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ /* { .offset = B2063_LOGEN_IDAC1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_SPARE1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2063_LOGEN_SPARE2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_LOGEN_SPARE3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_1ST1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_G_RX_1ST2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_1ST3, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND1, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND2, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND3, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND7, .value_a = 0x0035, .value_g = 0x0035, .flags = 0, }, */ /* { .offset = B2063_G_RX_2ND8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_PS1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_G_RX_PS2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_PS3, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_G_RX_PS4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_PS5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_MIX1, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ /* { .offset = B2063_G_RX_MIX2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_G_RX_MIX3, .value_a = 0x0071, .value_g = 0x0071, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2063_G_RX_MIX4, .value_a = 0x0071, .value_g = 0x0071, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_G_RX_MIX5, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ /* { .offset = B2063_G_RX_MIX6, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2063_G_RX_MIX7, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ /* { .offset = B2063_G_RX_MIX8, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2063_G_RX_PDET1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_SPARES1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_SPARES2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_G_RX_SPARES3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_1ST1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_A_RX_1ST2, .value_a = 0x00f0, .value_g = 0x0030, .flags = B206X_FLAG_A, }, /* { .offset = B2063_A_RX_1ST3, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ /* { .offset = B2063_A_RX_1ST4, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_A_RX_1ST5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND1, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND4, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_2ND7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ /* { .offset = B2063_A_RX_PS1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_PS2, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_A_RX_PS3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_PS4, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ /* { .offset = B2063_A_RX_PS5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_A_RX_PS6, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_A_RX_MIX1, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2063_A_RX_MIX2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_MIX3, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ { .offset = B2063_A_RX_MIX4, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2063_A_RX_MIX5, .value_a = 0x000f, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, { .offset = B2063_A_RX_MIX6, .value_a = 0x000f, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_A_RX_MIX7, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ /* { .offset = B2063_A_RX_MIX8, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ /* { .offset = B2063_A_RX_PWRDET1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_SPARE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_SPARE2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_A_RX_SPARE3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_RX_TIA_CTL1, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_RX_TIA_CTL2, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ { .offset = B2063_RX_TIA_CTL3, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_RX_TIA_CTL4, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ /* { .offset = B2063_RX_TIA_CTL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RX_TIA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL1, .value_a = 0x0074, .value_g = 0x0074, .flags = 0, }, */ { .offset = B2063_RX_BB_CTL2, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_RX_BB_CTL3, .value_a = 0x00a2, .value_g = 0x00a2, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL4, .value_a = 0x00aa, .value_g = 0x00aa, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL5, .value_a = 0x0024, .value_g = 0x0024, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL6, .value_a = 0x00a9, .value_g = 0x00a9, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL7, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL8, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */ /* { .offset = B2063_RX_BB_CTL9, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL1, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ /* { .offset = B2063_TX_RF_IDAC_LO_RF_I, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2063_TX_RF_IDAC_LO_RF_Q, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2063_TX_RF_IDAC_LO_BB_I, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2063_TX_RF_IDAC_LO_BB_Q, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL2, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL3, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL4, .value_a = 0x00b8, .value_g = 0x00b8, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL5, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL6, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL7, .value_a = 0x0078, .value_g = 0x0078, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL8, .value_a = 0x00c0, .value_g = 0x00c0, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL9, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_RF_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_PA_CTL1, .value_a = 0x0000, .value_g = 0x0004, .flags = B206X_FLAG_A, }, /* { .offset = B2063_PA_CTL2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ /* { .offset = B2063_PA_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PA_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PA_CTL5, .value_a = 0x0096, .value_g = 0x0096, .flags = 0, }, */ /* { .offset = B2063_PA_CTL6, .value_a = 0x0077, .value_g = 0x0077, .flags = 0, }, */ /* { .offset = B2063_PA_CTL7, .value_a = 0x005a, .value_g = 0x005a, .flags = 0, }, */ /* { .offset = B2063_PA_CTL8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PA_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PA_CTL10, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */ /* { .offset = B2063_PA_CTL11, .value_a = 0x0070, .value_g = 0x0070, .flags = 0, }, */ /* { .offset = B2063_PA_CTL12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_PA_CTL13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_BB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_BB_CTL2, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */ /* { .offset = B2063_TX_BB_CTL3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2063_TX_BB_CTL4, .value_a = 0x000b, .value_g = 0x000b, .flags = 0, }, */ /* { .offset = B2063_GPIO_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ { .offset = B2063_VREG_CTL1, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, }, /* { .offset = B2063_AMUX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_IQ_CALIB_GVAR, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */ /* { .offset = B2063_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ /* { .offset = B2063_IQ_CALIB_CTL2, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */ /* { .offset = B2063_TEMPSENSE_CTL1, .value_a = 0x0046, .value_g = 0x0046, .flags = 0, }, */ /* { .offset = B2063_TEMPSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_RX_LOOPBACK1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_TX_RX_LOOPBACK2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ /* { .offset = B2063_EXT_TSSI_CTL1, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */ /* { .offset = B2063_EXT_TSSI_CTL2, .value_a = 0x0023, .value_g = 0x0023, .flags = 0, }, */ /* { .offset = B2063_AFE_CTL , .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ }; void b2062_upload_init_table(struct b43_wldev *dev) { const struct b206x_init_tab_entry *e; unsigned int i; for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) { e = &b2062_init_tab[i]; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (!(e->flags & B206X_FLAG_G)) continue; b43_radio_write(dev, e->offset, e->value_g); } else { if (!(e->flags & B206X_FLAG_A)) continue; b43_radio_write(dev, e->offset, e->value_a); } } } void b2063_upload_init_table(struct b43_wldev *dev) { const struct b206x_init_tab_entry *e; unsigned int i; for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) { e = &b2063_init_tab[i]; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (!(e->flags & B206X_FLAG_G)) continue; b43_radio_write(dev, e->offset, e->value_g); } else { if (!(e->flags & B206X_FLAG_A)) continue; b43_radio_write(dev, e->offset, e->value_a); } } } u32 b43_lptab_read(struct b43_wldev *dev, u32 offset) { u32 type, value; type = offset & B43_LPTAB_TYPEMASK; offset &= ~B43_LPTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); switch (type) { case B43_LPTAB_8BIT: b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF; break; case B43_LPTAB_16BIT: b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO); break; case B43_LPTAB_32BIT: b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI); value <<= 16; value |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO); break; default: B43_WARN_ON(1); value = 0; } return value; } void b43_lptab_read_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, void *_data) { u32 type; u8 *data = _data; unsigned int i; type = offset & B43_LPTAB_TYPEMASK; offset &= ~B43_LPTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { switch (type) { case B43_LPTAB_8BIT: *data = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF; data++; break; case B43_LPTAB_16BIT: *((u16 *)data) = b43_phy_read(dev, B43_LPPHY_TABLEDATALO); data += 2; break; case B43_LPTAB_32BIT: *((u32 *)data) = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI); *((u32 *)data) <<= 16; *((u32 *)data) |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO); data += 4; break; default: B43_WARN_ON(1); } } } void b43_lptab_write(struct b43_wldev *dev, u32 offset, u32 value) { u32 type; type = offset & B43_LPTAB_TYPEMASK; offset &= ~B43_LPTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); switch (type) { case B43_LPTAB_8BIT: B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); break; case B43_LPTAB_16BIT: B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); break; case B43_LPTAB_32BIT: b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16); b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); break; default: B43_WARN_ON(1); } } void b43_lptab_write_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, const void *_data) { u32 type, value; const u8 *data = _data; unsigned int i; type = offset & B43_LPTAB_TYPEMASK; offset &= ~B43_LPTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { switch (type) { case B43_LPTAB_8BIT: value = *data; data++; B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); break; case B43_LPTAB_16BIT: value = *((u16 *)data); data += 2; B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); break; case B43_LPTAB_32BIT: value = *((u32 *)data); data += 4; b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16); b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); break; default: B43_WARN_ON(1); } } } static const u8 lpphy_min_sig_sq_table[] = { 0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd, 0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, 0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, }; static const u16 lpphy_rev01_noise_scale_table[] = { 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36, }; static const u16 lpphy_rev2plus_noise_scale_table[] = { 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, }; static const u16 lpphy_crs_gain_nft_table[] = { 0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d, }; static const u16 lpphy_rev01_filter_control_table[] = { 0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53, 0x0127, }; static const u32 lpphy_rev2plus_filter_control_table[] = { 0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f, }; static const u32 lpphy_rev01_ps_control_table[] = { 0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0, 0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0, }; static const u32 lpphy_rev2plus_ps_control_table[] = { 0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002, }; static const u8 lpphy_pll_fraction_table[] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, }; static const u16 lpphy_iqlo_cal_table[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u16 lpphy_rev0_ofdm_cck_gain_table[] = { 0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const u16 lpphy_rev1_ofdm_cck_gain_table[] = { 0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const u16 lpphy_gain_delta_table[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u32 lpphy_tx_power_control_table[] = { 0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033, 0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015, 0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20, 0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663, 0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2, 0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1, 0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc, 0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500, 0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe, 0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05, 0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08, 0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702, }; static const u32 lpphy_gain_idx_table[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, }; static const u16 lpphy_aux_gain_idx_table[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, }; static const u32 lpphy_gain_value_table[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1, 0x00000000, 0x00000000, }; static const u16 lpphy_gain_table[] = { 0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u32 lpphy_a0_gain_idx_table[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28, }; static const u16 lpphy_a0_aux_gain_idx_table[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, }; static const u32 lpphy_a0_gain_value_table[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7, 0x00000000, 0x00000000, }; static const u16 lpphy_a0_gain_table[] = { 0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u16 lpphy_sw_control_table[] = { 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, }; static const u8 lpphy_hf_table[] = { 0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, }; static const u32 lpphy_papd_eps_table[] = { 0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16, 0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356, 0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506, }; static const u32 lpphy_papd_mult_table[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28, }; static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 139, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 135, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 131, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 128, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 124, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 121, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 117, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 114, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 111, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 107, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 104, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 101, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 99, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 96, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 93, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 90, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 88, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 85, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 83, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 81, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 78, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 76, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 74, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = { { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 71, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 69, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 67, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 58, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 67, }, { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 58, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 57, }, { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 83, }, { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 81, }, { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 78, }, { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 76, }, { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 74, }, { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 72, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 55, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 55, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 73, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 139, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 135, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 131, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 128, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 124, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 121, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 117, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 114, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 111, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 107, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 104, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 101, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 99, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 96, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 93, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 90, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 88, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 85, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 83, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 81, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 78, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 76, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 74, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = { { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 61, }, { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 72, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 70, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 68, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 66, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 55, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 55, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 73, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 56, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 58, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 57, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 62, }, { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = { { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 152, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 147, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 143, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 139, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 135, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 131, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 128, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 124, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 121, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 117, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 114, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 111, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 107, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 104, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 101, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 99, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 96, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 93, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 90, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 88, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 85, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 83, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 81, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 78, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 76, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 74, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 72, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 70, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 68, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 66, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 197, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 192, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 186, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 181, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 176, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 171, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 166, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 161, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 157, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 152, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 148, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 144, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 140, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 136, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 132, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 128, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 124, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 121, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 117, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 114, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 111, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 108, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 105, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 102, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 99, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 96, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 93, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 91, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 88, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 86, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 83, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 81, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 79, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 76, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 74, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 72, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 70, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 68, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 66, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 248, .pad = 64, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 248, .pad = 62, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 241, .pad = 62, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 241, .pad = 60, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 234, .pad = 60, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 234, .pad = 59, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 227, .pad = 59, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 227, .pad = 57, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 221, .pad = 57, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 221, .pad = 55, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 215, .pad = 55, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 215, .pad = 54, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 208, .pad = 54, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 208, .pad = 52, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 203, .pad = 52, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 203, .pad = 51, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 197, .pad = 51, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 197, .pad = 49, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 191, .pad = 49, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 191, .pad = 48, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 186, .pad = 48, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 186, .pad = 47, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 181, .pad = 47, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 181, .pad = 45, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 175, .pad = 45, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 175, .pad = 44, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 170, .pad = 44, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 170, .pad = 43, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 166, .pad = 43, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 166, .pad = 42, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 161, .pad = 42, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 161, .pad = 40, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 156, .pad = 40, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 156, .pad = 39, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 152, .pad = 39, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 152, .pad = 38, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 148, .pad = 38, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 148, .pad = 37, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 143, .pad = 37, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 143, .pad = 36, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 139, .pad = 36, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 139, .pad = 35, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 135, .pad = 35, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 135, .pad = 34, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 132, .pad = 34, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 132, .pad = 33, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 128, .pad = 33, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 128, .pad = 32, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 124, .pad = 32, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 124, .pad = 31, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 121, .pad = 31, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 121, .pad = 30, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 117, .pad = 30, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 117, .pad = 29, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 114, .pad = 29, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 114, .pad = 29, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 111, .pad = 29, .dac = 0, .bb_mult = 64, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = { { .gm = 7, .pga = 99, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 96, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 93, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 90, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 88, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 85, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 83, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 81, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 78, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 76, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 74, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 72, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 70, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 68, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 66, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 64, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 64, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 62, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 62, .pad = 248, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 60, .pad = 248, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 60, .pad = 241, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 59, .pad = 241, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 59, .pad = 234, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 57, .pad = 234, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 57, .pad = 227, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 55, .pad = 227, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 55, .pad = 221, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 54, .pad = 221, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 54, .pad = 215, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 52, .pad = 215, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 52, .pad = 208, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 51, .pad = 208, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 51, .pad = 203, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 49, .pad = 203, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 49, .pad = 197, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 48, .pad = 197, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 48, .pad = 191, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 47, .pad = 191, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 47, .pad = 186, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 45, .pad = 186, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 45, .pad = 181, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 44, .pad = 181, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 44, .pad = 175, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 43, .pad = 175, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 43, .pad = 170, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 42, .pad = 170, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 42, .pad = 166, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 40, .pad = 166, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 40, .pad = 161, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 39, .pad = 161, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 39, .pad = 156, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 38, .pad = 156, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 38, .pad = 152, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 37, .pad = 152, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 37, .pad = 148, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 36, .pad = 148, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 36, .pad = 143, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 35, .pad = 143, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 35, .pad = 139, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 34, .pad = 139, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 34, .pad = 135, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 33, .pad = 135, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 33, .pad = 132, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 32, .pad = 132, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 32, .pad = 128, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 31, .pad = 128, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 31, .pad = 124, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 30, .pad = 124, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 30, .pad = 121, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 29, .pad = 121, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 29, .pad = 117, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 29, .pad = 117, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 29, .pad = 114, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 28, .pad = 114, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 28, .pad = 111, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 27, .pad = 111, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 27, .pad = 108, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 26, .pad = 108, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 26, .pad = 104, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 25, .pad = 104, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 25, .pad = 102, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 25, .pad = 102, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 25, .pad = 99, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 24, .pad = 99, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 24, .pad = 96, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 23, .pad = 96, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 23, .pad = 93, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 23, .pad = 93, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 23, .pad = 90, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 22, .pad = 90, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 22, .pad = 88, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 21, .pad = 88, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 21, .pad = 85, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 21, .pad = 85, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 21, .pad = 83, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 20, .pad = 83, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 20, .pad = 81, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 20, .pad = 81, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 20, .pad = 78, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 19, .pad = 78, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 19, .pad = 76, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 19, .pad = 76, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 19, .pad = 74, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 18, .pad = 74, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 18, .pad = 72, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 18, .pad = 72, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 18, .pad = 70, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 17, .pad = 70, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 17, .pad = 68, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 17, .pad = 68, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 17, .pad = 66, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 16, .pad = 66, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 16, .pad = 64, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 16, .pad = 64, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 16, .pad = 62, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 62, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 60, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 60, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 15, .pad = 59, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 59, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 57, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 57, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 55, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 55, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 14, .pad = 54, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 54, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, }, }; static struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = { { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 152, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 147, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 143, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 139, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 135, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 131, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 128, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 124, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 121, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 117, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 114, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 111, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 107, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 104, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 101, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 99, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 96, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 93, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 90, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 88, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 85, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 83, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 81, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 78, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 76, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 74, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 72, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 70, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 68, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 66, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 248, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 241, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 234, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 227, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 221, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 215, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 208, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 197, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 191, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 186, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 181, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 175, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 170, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 166, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 161, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 156, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 152, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 148, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 143, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 139, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 135, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 132, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 128, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 124, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 121, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 117, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 114, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 111, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 108, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 104, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 102, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 99, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 96, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 93, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 90, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 88, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 85, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 83, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 81, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 78, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 76, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 74, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 72, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 70, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 68, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 66, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 255, .pad = 62, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 248, .pad = 62, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 248, .pad = 60, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 241, .pad = 60, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 241, .pad = 59, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 234, .pad = 59, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 234, .pad = 57, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 227, .pad = 57, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 227, .pad = 55, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 221, .pad = 55, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 221, .pad = 54, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 215, .pad = 54, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 215, .pad = 52, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 208, .pad = 52, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 208, .pad = 51, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 203, .pad = 51, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 203, .pad = 49, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 197, .pad = 49, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 197, .pad = 48, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 191, .pad = 48, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 191, .pad = 47, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 186, .pad = 47, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 186, .pad = 45, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 181, .pad = 45, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 181, .pad = 44, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 175, .pad = 44, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 175, .pad = 43, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 170, .pad = 43, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 170, .pad = 42, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 166, .pad = 42, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 166, .pad = 40, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 161, .pad = 40, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 161, .pad = 39, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 156, .pad = 39, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 156, .pad = 38, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 152, .pad = 38, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 152, .pad = 37, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 148, .pad = 37, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 148, .pad = 36, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 143, .pad = 36, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 143, .pad = 35, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 139, .pad = 35, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 139, .pad = 34, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 135, .pad = 34, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 135, .pad = 33, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 132, .pad = 33, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 132, .pad = 32, .dac = 0, .bb_mult = 64, }, { .gm = 255, .pga = 128, .pad = 32, .dac = 0, .bb_mult = 64, }, }; void lpphy_rev0_1_table_init(struct b43_wldev *dev) { B43_WARN_ON(dev->phy.rev >= 2); b43_lptab_write_bulk(dev, B43_LPTAB8(2, 0), ARRAY_SIZE(lpphy_min_sig_sq_table), lpphy_min_sig_sq_table); b43_lptab_write_bulk(dev, B43_LPTAB16(1, 0), ARRAY_SIZE(lpphy_rev01_noise_scale_table), lpphy_rev01_noise_scale_table); b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0), ARRAY_SIZE(lpphy_crs_gain_nft_table), lpphy_crs_gain_nft_table); b43_lptab_write_bulk(dev, B43_LPTAB16(8, 0), ARRAY_SIZE(lpphy_rev01_filter_control_table), lpphy_rev01_filter_control_table); b43_lptab_write_bulk(dev, B43_LPTAB32(9, 0), ARRAY_SIZE(lpphy_rev01_ps_control_table), lpphy_rev01_ps_control_table); b43_lptab_write_bulk(dev, B43_LPTAB8(6, 0), ARRAY_SIZE(lpphy_pll_fraction_table), lpphy_pll_fraction_table); b43_lptab_write_bulk(dev, B43_LPTAB16(0, 0), ARRAY_SIZE(lpphy_iqlo_cal_table), lpphy_iqlo_cal_table); if (dev->phy.rev == 0) { b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), ARRAY_SIZE(lpphy_rev0_ofdm_cck_gain_table), lpphy_rev0_ofdm_cck_gain_table); b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), ARRAY_SIZE(lpphy_rev0_ofdm_cck_gain_table), lpphy_rev0_ofdm_cck_gain_table); } else { b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), ARRAY_SIZE(lpphy_rev1_ofdm_cck_gain_table), lpphy_rev1_ofdm_cck_gain_table); b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), ARRAY_SIZE(lpphy_rev1_ofdm_cck_gain_table), lpphy_rev1_ofdm_cck_gain_table); } b43_lptab_write_bulk(dev, B43_LPTAB16(15, 0), ARRAY_SIZE(lpphy_gain_delta_table), lpphy_gain_delta_table); b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0), ARRAY_SIZE(lpphy_tx_power_control_table), lpphy_tx_power_control_table); } void lpphy_rev2plus_table_init(struct b43_wldev *dev) { struct ssb_bus *bus = dev->sdev->bus; int i; B43_WARN_ON(dev->phy.rev < 2); for (i = 0; i < 704; i++) b43_lptab_write(dev, B43_LPTAB32(7, i), 0); b43_lptab_write_bulk(dev, B43_LPTAB8(2, 0), ARRAY_SIZE(lpphy_min_sig_sq_table), lpphy_min_sig_sq_table); b43_lptab_write_bulk(dev, B43_LPTAB16(1, 0), ARRAY_SIZE(lpphy_rev2plus_noise_scale_table), lpphy_rev2plus_noise_scale_table); b43_lptab_write_bulk(dev, B43_LPTAB32(11, 0), ARRAY_SIZE(lpphy_rev2plus_filter_control_table), lpphy_rev2plus_filter_control_table); b43_lptab_write_bulk(dev, B43_LPTAB32(12, 0), ARRAY_SIZE(lpphy_rev2plus_ps_control_table), lpphy_rev2plus_ps_control_table); b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0), ARRAY_SIZE(lpphy_gain_idx_table), lpphy_gain_idx_table); b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0), ARRAY_SIZE(lpphy_aux_gain_idx_table), lpphy_aux_gain_idx_table); b43_lptab_write_bulk(dev, B43_LPTAB16(15, 0), ARRAY_SIZE(lpphy_sw_control_table), lpphy_sw_control_table); b43_lptab_write_bulk(dev, B43_LPTAB8(16, 0), ARRAY_SIZE(lpphy_hf_table), lpphy_hf_table); b43_lptab_write_bulk(dev, B43_LPTAB32(17, 0), ARRAY_SIZE(lpphy_gain_value_table), lpphy_gain_value_table); b43_lptab_write_bulk(dev, B43_LPTAB16(18, 0), ARRAY_SIZE(lpphy_gain_table), lpphy_gain_table); b43_lptab_write_bulk(dev, B43_LPTAB8(6, 0), ARRAY_SIZE(lpphy_pll_fraction_table), lpphy_pll_fraction_table); b43_lptab_write_bulk(dev, B43_LPTAB16(0, 0), ARRAY_SIZE(lpphy_iqlo_cal_table), lpphy_iqlo_cal_table); b43_lptab_write_bulk(dev, B43_LPTAB32(9, 0), ARRAY_SIZE(lpphy_papd_eps_table), lpphy_papd_eps_table); b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0), ARRAY_SIZE(lpphy_papd_mult_table), lpphy_papd_mult_table); if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) { b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0), ARRAY_SIZE(lpphy_a0_gain_idx_table), lpphy_a0_gain_idx_table); b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0), ARRAY_SIZE(lpphy_a0_aux_gain_idx_table), lpphy_a0_aux_gain_idx_table); b43_lptab_write_bulk(dev, B43_LPTAB32(17, 0), ARRAY_SIZE(lpphy_a0_gain_value_table), lpphy_a0_gain_value_table); b43_lptab_write_bulk(dev, B43_LPTAB16(18, 0), ARRAY_SIZE(lpphy_a0_gain_table), lpphy_a0_gain_table); } } static void lpphy_rev0_1_write_gain_table(struct b43_wldev *dev, int offset, struct lpphy_tx_gain_table_entry data) { u32 tmp; B43_WARN_ON(dev->phy.rev >= 2); tmp = data.pad << 11; tmp |= data.pga << 7; tmp |= data.gm << 4; tmp |= data.dac; b43_lptab_write(dev, B43_LPTAB32(10, 0xC0 + offset), tmp); tmp = data.bb_mult << 20; b43_lptab_write(dev, B43_LPTAB32(10, 0x140 + offset), tmp); } static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset, struct lpphy_tx_gain_table_entry data) { u32 tmp; B43_WARN_ON(dev->phy.rev < 2); tmp = data.pad << 16; tmp |= data.pga << 8; tmp |= data.gm; if (dev->phy.rev >= 3) { if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) tmp |= 0x10 << 24; else tmp |= 0x70 << 24; } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) tmp |= 0x14 << 24; else tmp |= 0x7F << 24; } b43_lptab_write(dev, B43_LPTAB32(7, 0xC0 + offset), tmp); tmp = data.bb_mult << 20; tmp |= data.dac << 28; b43_lptab_write(dev, B43_LPTAB32(7, 0x140 + offset), tmp); } void lpphy_write_gain_table(struct b43_wldev *dev, int offset, struct lpphy_tx_gain_table_entry data) { if (dev->phy.rev >= 2) lpphy_rev2plus_write_gain_table(dev, offset, data); else lpphy_rev0_1_write_gain_table(dev, offset, data); } void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count, struct lpphy_tx_gain_table_entry *table) { int i; for (i = offset; i < count; i++) lpphy_write_gain_table(dev, i, table[i]); } void lpphy_init_tx_gain_table(struct b43_wldev *dev) { struct ssb_bus *bus = dev->sdev->bus; switch (dev->phy.rev) { case 0: if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) || (bus->sprom.boardflags_lo & B43_BFL_HGPA)) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev0_nopa_tx_gain_table); else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev0_2ghz_tx_gain_table); else lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev0_5ghz_tx_gain_table); break; case 1: if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) || (bus->sprom.boardflags_lo & B43_BFL_HGPA)) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev1_nopa_tx_gain_table); else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev1_2ghz_tx_gain_table); else lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev1_5ghz_tx_gain_table); break; default: if (bus->sprom.boardflags_hi & B43_BFH_NOPA) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev2_nopa_tx_gain_table); else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev2_2ghz_tx_gain_table); else lpphy_write_gain_table_bulk(dev, 0, 128, lpphy_rev2_5ghz_tx_gain_table); } }
gpl-2.0
Alex-V2/One_M8_4.4.3_kernel
arch/ia64/hp/sim/boot/fw-emu.c
6663
11537
/* * PAL & SAL emulation. * * Copyright (C) 1998-2001 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #ifdef CONFIG_PCI # include <linux/pci.h> #endif #include <linux/efi.h> #include <asm/io.h> #include <asm/pal.h> #include <asm/sal.h> #include <asm/setup.h> #include "ssc.h" #define MB (1024*1024UL) #define SIMPLE_MEMMAP 1 #if SIMPLE_MEMMAP # define NUM_MEM_DESCS 4 #else # define NUM_MEM_DESCS 16 #endif static char fw_mem[( sizeof(struct ia64_boot_param) + sizeof(efi_system_table_t) + sizeof(efi_runtime_services_t) + 1*sizeof(efi_config_table_t) + sizeof(struct ia64_sal_systab) + sizeof(struct ia64_sal_desc_entry_point) + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) + 1024)] __attribute__ ((aligned (8))); #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) /* Compute the `struct tm' representation of *T, offset OFFSET seconds east of UTC, and store year, yday, mon, mday, wday, hour, min, sec into *TP. Return nonzero if successful. */ int offtime (unsigned long t, efi_time_t *tp) { const unsigned short int __mon_yday[2][13] = { /* Normal years. */ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, /* Leap years. */ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } }; long int days, rem, y; const unsigned short int *ip; days = t / SECS_PER_DAY; rem = t % SECS_PER_DAY; while (rem < 0) { rem += SECS_PER_DAY; --days; } while (rem >= SECS_PER_DAY) { rem -= SECS_PER_DAY; ++days; } tp->hour = rem / SECS_PER_HOUR; rem %= SECS_PER_HOUR; tp->minute = rem / 60; tp->second = rem % 60; /* January 1, 1970 was a Thursday. */ y = 1970; # define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) # define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) # define __isleap(year) \ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) while (days < 0 || days >= (__isleap (y) ? 366 : 365)) { /* Guess a corrected year, assuming 365 days per year. */ long int yg = y + days / 365 - (days % 365 < 0); /* Adjust DAYS and Y to match the guessed year. */ days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1) - LEAPS_THRU_END_OF (y - 1)); y = yg; } tp->year = y; ip = __mon_yday[__isleap(y)]; for (y = 11; days < (long int) ip[y]; --y) continue; days -= ip[y]; tp->month = y + 1; tp->day = days + 1; return 1; } extern void pal_emulator_static (void); /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ #define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) #define REG_OFFSET(addr) (0x00000000000000FF & (addr)) #define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr)) #define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr)) static efi_status_t fw_efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) { #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) struct { int tv_sec; /* must be 32bits to work */ int tv_usec; } tv32bits; ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD); memset(tm, 0, sizeof(*tm)); offtime(tv32bits.tv_sec, tm); if (tc) memset(tc, 0, sizeof(*tc)); #else # error Not implemented yet... #endif return EFI_SUCCESS; } static void efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) { #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) ssc(status, 0, 0, 0, SSC_EXIT); #else # error Not implemented yet... #endif } static efi_status_t efi_unimplemented (void) { return EFI_UNSUPPORTED; } static struct sal_ret_values sal_emulator (long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7) { long r9 = 0; long r10 = 0; long r11 = 0; long status; /* * Don't do a "switch" here since that gives us code that * isn't self-relocatable. */ status = 0; if (index == SAL_FREQ_BASE) { if (in1 == SAL_FREQ_BASE_PLATFORM) r9 = 200000000; else if (in1 == SAL_FREQ_BASE_INTERVAL_TIMER) { /* * Is this supposed to be the cr.itc frequency * or something platform specific? The SAL * doc ain't exactly clear on this... */ r9 = 700000000; } else if (in1 == SAL_FREQ_BASE_REALTIME_CLOCK) r9 = 1; else status = -1; } else if (index == SAL_SET_VECTORS) { ; } else if (index == SAL_GET_STATE_INFO) { ; } else if (index == SAL_GET_STATE_INFO_SIZE) { ; } else if (index == SAL_CLEAR_STATE_INFO) { ; } else if (index == SAL_MC_RENDEZ) { ; } else if (index == SAL_MC_SET_PARAMS) { ; } else if (index == SAL_CACHE_FLUSH) { ; } else if (index == SAL_CACHE_INIT) { ; #ifdef CONFIG_PCI } else if (index == SAL_PCI_CONFIG_READ) { /* * in1 contains the PCI configuration address and in2 * the size of the read. The value that is read is * returned via the general register r9. */ outl(BUILD_CMD(in1), 0xCF8); if (in2 == 1) /* Reading byte */ r9 = inb(0xCFC + ((REG_OFFSET(in1) & 3))); else if (in2 == 2) /* Reading word */ r9 = inw(0xCFC + ((REG_OFFSET(in1) & 2))); else /* Reading dword */ r9 = inl(0xCFC); status = PCIBIOS_SUCCESSFUL; } else if (index == SAL_PCI_CONFIG_WRITE) { /* * in1 contains the PCI configuration address, in2 the * size of the write, and in3 the actual value to be * written out. */ outl(BUILD_CMD(in1), 0xCF8); if (in2 == 1) /* Writing byte */ outb(in3, 0xCFC + ((REG_OFFSET(in1) & 3))); else if (in2 == 2) /* Writing word */ outw(in3, 0xCFC + ((REG_OFFSET(in1) & 2))); else /* Writing dword */ outl(in3, 0xCFC); status = PCIBIOS_SUCCESSFUL; #endif /* CONFIG_PCI */ } else if (index == SAL_UPDATE_PAL) { ; } else { status = -1; } return ((struct sal_ret_values) {status, r9, r10, r11}); } struct ia64_boot_param * sys_fw_init (const char *args, int arglen) { efi_system_table_t *efi_systab; efi_runtime_services_t *efi_runtime; efi_config_table_t *efi_tables; struct ia64_sal_systab *sal_systab; efi_memory_desc_t *efi_memmap, *md; unsigned long *pal_desc, *sal_desc; struct ia64_sal_desc_entry_point *sal_ed; struct ia64_boot_param *bp; unsigned char checksum = 0; char *cp, *cmd_line; int i = 0; # define MAKE_MD(typ, attr, start, end) \ do { \ md = efi_memmap + i++; \ md->type = typ; \ md->pad = 0; \ md->phys_addr = start; \ md->virt_addr = 0; \ md->num_pages = (end - start) >> 12; \ md->attribute = attr; \ } while (0) memset(fw_mem, 0, sizeof(fw_mem)); pal_desc = (unsigned long *) &pal_emulator_static; sal_desc = (unsigned long *) &sal_emulator; cp = fw_mem; efi_systab = (void *) cp; cp += sizeof(*efi_systab); efi_runtime = (void *) cp; cp += sizeof(*efi_runtime); efi_tables = (void *) cp; cp += sizeof(*efi_tables); sal_systab = (void *) cp; cp += sizeof(*sal_systab); sal_ed = (void *) cp; cp += sizeof(*sal_ed); efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap); bp = (void *) cp; cp += sizeof(*bp); cmd_line = (void *) cp; if (args) { if (arglen >= 1024) arglen = 1023; memcpy(cmd_line, args, arglen); } else { arglen = 0; } cmd_line[arglen] = '\0'; memset(efi_systab, 0, sizeof(*efi_systab)); efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE; efi_systab->hdr.revision = ((1 << 16) | 00); efi_systab->hdr.headersize = sizeof(efi_systab->hdr); efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); efi_systab->fw_revision = 1; efi_systab->runtime = (void *) __pa(efi_runtime); efi_systab->nr_tables = 1; efi_systab->tables = __pa(efi_tables); efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); efi_runtime->get_time = __pa(&fw_efi_get_time); efi_runtime->set_time = __pa(&efi_unimplemented); efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); efi_runtime->get_variable = __pa(&efi_unimplemented); efi_runtime->get_next_variable = __pa(&efi_unimplemented); efi_runtime->set_variable = __pa(&efi_unimplemented); efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); efi_runtime->reset_system = __pa(&efi_reset_system); efi_tables->guid = SAL_SYSTEM_TABLE_GUID; efi_tables->table = __pa(sal_systab); /* fill in the SAL system table: */ memcpy(sal_systab->signature, "SST_", 4); sal_systab->size = sizeof(*sal_systab); sal_systab->sal_rev_minor = 1; sal_systab->sal_rev_major = 0; sal_systab->entry_count = 1; #ifdef CONFIG_IA64_GENERIC strcpy(sal_systab->oem_id, "Generic"); strcpy(sal_systab->product_id, "IA-64 system"); #endif #ifdef CONFIG_IA64_HP_SIM strcpy(sal_systab->oem_id, "Hewlett-Packard"); strcpy(sal_systab->product_id, "HP-simulator"); #endif /* fill in an entry point: */ sal_ed->type = SAL_DESC_ENTRY_POINT; sal_ed->pal_proc = __pa(pal_desc[0]); sal_ed->sal_proc = __pa(sal_desc[0]); sal_ed->gp = __pa(sal_desc[1]); for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp) checksum += *cp; sal_systab->checksum = -checksum; #if SIMPLE_MEMMAP /* simulate free memory at physical address zero */ MAKE_MD(EFI_BOOT_SERVICES_DATA, EFI_MEMORY_WB, 0*MB, 1*MB); MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB, 1*MB, 2*MB); MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 2*MB, 130*MB); MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 4096*MB, 4128*MB); #else MAKE_MD( 4, 0x9, 0x0000000000000000, 0x0000000000001000); MAKE_MD( 7, 0x9, 0x0000000000001000, 0x000000000008a000); MAKE_MD( 4, 0x9, 0x000000000008a000, 0x00000000000a0000); MAKE_MD( 5, 0x8000000000000009, 0x00000000000c0000, 0x0000000000100000); MAKE_MD( 7, 0x9, 0x0000000000100000, 0x0000000004400000); MAKE_MD( 2, 0x9, 0x0000000004400000, 0x0000000004be5000); MAKE_MD( 7, 0x9, 0x0000000004be5000, 0x000000007f77e000); MAKE_MD( 6, 0x8000000000000009, 0x000000007f77e000, 0x000000007fb94000); MAKE_MD( 6, 0x8000000000000009, 0x000000007fb94000, 0x000000007fb95000); MAKE_MD( 6, 0x8000000000000009, 0x000000007fb95000, 0x000000007fc00000); MAKE_MD(13, 0x8000000000000009, 0x000000007fc00000, 0x000000007fc3a000); MAKE_MD( 7, 0x9, 0x000000007fc3a000, 0x000000007fea0000); MAKE_MD( 5, 0x8000000000000009, 0x000000007fea0000, 0x000000007fea8000); MAKE_MD( 7, 0x9, 0x000000007fea8000, 0x000000007feab000); MAKE_MD( 5, 0x8000000000000009, 0x000000007feab000, 0x000000007ffff000); MAKE_MD( 7, 0x9, 0x00000000ff400000, 0x0000000104000000); #endif bp->efi_systab = __pa(&fw_mem); bp->efi_memmap = __pa(efi_memmap); bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); bp->efi_memdesc_size = sizeof(efi_memory_desc_t); bp->efi_memdesc_version = 1; bp->command_line = __pa(cmd_line); bp->console_info.num_cols = 80; bp->console_info.num_rows = 25; bp->console_info.orig_x = 0; bp->console_info.orig_y = 24; bp->fpswa = 0; return bp; }
gpl-2.0
ISTweak/android_kernel_htc_valentewx
arch/ia64/kernel/signal.c
8455
18467
/* * Architecture-specific signal handling support. * * Copyright (C) 1999-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * Derived from i386 and Alpha versions. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/unistd.h> #include <linux/wait.h> #include <asm/intrinsics.h> #include <asm/uaccess.h> #include <asm/rse.h> #include <asm/sigcontext.h> #include "sigframe.h" #define DEBUG_SIG 0 #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #if _NSIG_WORDS > 1 # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t)) #else # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif asmlinkage long sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { return do_sigaltstack(uss, uoss, regs.r12); } static long restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) { unsigned long ip, flags, nat, um, cfm, rsc; long err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* restore scratch that always needs gets updated during signal delivery: */ err = __get_user(flags, &sc->sc_flags); err |= __get_user(nat, &sc->sc_nat); err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ err |= __get_user(cfm, &sc->sc_cfm); err |= __get_user(um, &sc->sc_um); /* user mask */ err |= __get_user(rsc, &sc->sc_ar_rsc); err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */ err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */ err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */ scr->pt.cr_ifs = cfm | (1UL << 63); scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */ /* establish new instruction pointer: */ scr->pt.cr_iip = ip & ~0x3UL; ia64_psr(&scr->pt)->ri = ip & 0x3; scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Restore most scratch-state only when not in syscall. */ err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */ err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ } if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) { struct ia64_psr *psr = ia64_psr(&scr->pt); err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ preempt_disable(); if (psr->dfh) ia64_drop_fpu(current); else { /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */ __ia64_load_fpu(current->thread.fph); ia64_set_local_fpu_owner(current); } preempt_enable(); } return err; } int copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) { if (__copy_to_user(to, from, sizeof(siginfo_t))) return -EFAULT; return 0; } else { int err; /* * If you change siginfo_t structure, please be sure this code is fixed * accordingly. It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic 3 ints plus the * relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code >> 16) { case __SI_FAULT >> 16: err |= __put_user(from->si_flags, &to->si_flags); err |= __put_user(from->si_isr, &to->si_isr); case __SI_POLL >> 16: err |= __put_user(from->si_addr, &to->si_addr); err |= __put_user(from->si_imm, &to->si_imm); break; case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_RT >> 16: /* Not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); break; } return err; } } long ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext __user *sc; struct siginfo si; sigset_t set; long retval; sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc; /* * When we return to the previously executing context, r8 and r10 have already * been setup the way we want them. Indeed, if the signal wasn't delivered while * in a system call, we must not touch r8 or r10 as otherwise user-level state * could be corrupted. */ retval = (long) &ia64_leave_kernel; if (test_thread_flag(TIF_SYSCALL_TRACE) || test_thread_flag(TIF_SYSCALL_AUDIT)) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. * Thus, the return-value that strace displays for sigreturn is * meaningless. */ retval = (long) &ia64_strace_leave_kernel; if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) goto give_sigsegv; if (GET_SIGSET(&set, &sc->sc_mask)) goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); { current->blocked = set; recalc_sigpending(); } spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(sc, scr)) goto give_sigsegv; #if DEBUG_SIG printk("SIG return (%s:%d): sp=%lx ip=%lx\n", current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); #endif /* * It is more difficult to avoid calling this function than to * call it and ignore errors. */ do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12); return retval; give_sigsegv: si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); si.si_uid = current_uid(); si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); return retval; } /* * This does just the minimum required setup of sigcontext. * Specifically, it only installs data that is either not knowable at * the user-level or that gets modified before execution in the * trampoline starts. Everything else is done at the user-level. */ static long setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr) { unsigned long flags = 0, ifs, cfm, nat; long err = 0; ifs = scr->pt.cr_ifs; if (on_sig_stack((unsigned long) sc)) flags |= IA64_SC_FLAG_ONSTACK; if ((ifs & (1UL << 63)) == 0) /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */ flags |= IA64_SC_FLAG_IN_SYSCALL; cfm = ifs & ((1UL << 38) - 1); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { flags |= IA64_SC_FLAG_FPH_VALID; err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); } nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); err |= __put_user(flags, &sc->sc_flags); err |= __put_user(nat, &sc->sc_nat); err |= PUT_SIGSET(mask, &sc->sc_mask); err |= __put_user(cfm, &sc->sc_cfm); err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */ err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */ err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */ err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ } return err; } /* * Check whether the register-backing store is already on the signal stack. */ static inline int rbs_on_sig_stack (unsigned long bsp) { return (bsp - current->sas_ss_sp < current->sas_ss_size); } static long force_sigsegv_info (int sig, void __user *addr) { unsigned long flags; struct siginfo si; if (sig == SIGSEGV) { /* * Acquiring siglock around the sa_handler-update is almost * certainly overkill, but this isn't a * performance-critical path and I'd rather play it safe * here than having to debug a nasty race if and when * something changes in kernel/signal.c that would make it * no longer safe to modify sa_handler without holding the * lock. */ spin_lock_irqsave(&current->sighand->siglock, flags); current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; spin_unlock_irqrestore(&current->sighand->siglock, flags); } si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); si.si_uid = current_uid(); si.si_addr = addr; force_sig_info(SIGSEGV, &si, current); return 0; } static long setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct sigscratch *scr) { extern char __kernel_sigtramp[]; unsigned long tramp_addr, new_rbs = 0, new_sp; struct sigframe __user *frame; long err; new_sp = scr->pt.r12; tramp_addr = (unsigned long) __kernel_sigtramp; if (ka->sa.sa_flags & SA_ONSTACK) { int onstack = sas_ss_flags(new_sp); if (onstack == 0) { new_sp = current->sas_ss_sp + current->sas_ss_size; /* * We need to check for the register stack being on the * signal stack separately, because it's switched * separately (memory stack is switched in the kernel, * register stack is switched in the signal trampoline). */ if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) new_rbs = ALIGN(current->sas_ss_sp, sizeof(long)); } else if (onstack == SS_ONSTACK) { unsigned long check_sp; /* * If we are on the alternate signal stack and would * overflow it, don't. Return an always-bogus address * instead so we will die with SIGSEGV. */ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; if (!likely(on_sig_stack(check_sp))) return force_sigsegv_info(sig, (void __user *) check_sp); } } frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return force_sigsegv_info(sig, frame); err = __put_user(sig, &frame->arg0); err |= __put_user(&frame->info, &frame->arg1); err |= __put_user(&frame->sc, &frame->arg2); err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ err |= __put_user(ka->sa.sa_handler, &frame->handler); err |= copy_siginfo_to_user(&frame->info, info); err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); err |= setup_sigcontext(&frame->sc, set, scr); if (unlikely(err)) return force_sigsegv_info(sig, frame); scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ scr->pt.cr_iip = tramp_addr; ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ /* * Force the interruption function mask to zero. This has no effect when a * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is * ignored), but it has the desirable effect of making it possible to deliver a * signal with an incomplete register frame (which happens when a mandatory RSE * load faults). Furthermore, it has no negative effect on the getting the user's * dirty partition preserved, because that's governed by scr->pt.loadrs. */ scr->pt.cr_ifs = (1UL << 63); /* * Note: this affects only the NaT bits of the scratch regs (the ones saved in * pt_regs), which is exactly what we want. */ scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ #if DEBUG_SIG printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); #endif return 1; } static long handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct sigscratch *scr) { if (!setup_frame(sig, ka, info, oldset, scr)) return 0; spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); /* * Let tracing know that we've done the handler setup. */ tracehook_signal_handler(sig, info, ka, &scr->pt, test_thread_flag(TIF_SINGLESTEP)); return 1; } /* * Note that `init' is a special process: it doesn't get signals it doesn't want to * handle. Thus you cannot kill init even with a SIGKILL even by mistake. */ void ia64_do_signal (struct sigscratch *scr, long in_syscall) { struct k_sigaction ka; sigset_t *oldset; siginfo_t info; long restart = in_syscall; long errno = scr->pt.r8; /* * In the ia64_leave_kernel code path, we want the common case to go fast, which * is why we may in certain cases get here from kernel mode. Just return without * doing anything if so. */ if (!user_mode(&scr->pt)) return; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; /* * This only loops in the rare cases of handle_signal() failing, in which case we * need to push through a forced SIGSEGV. */ while (1) { int signr = get_signal_to_deliver(&info, &ka, &scr->pt, NULL); /* * get_signal_to_deliver() may have run a debugger (via notify_parent()) * and the debugger may have modified the state (e.g., to arrange for an * inferior call), thus it's important to check for restarting _after_ * get_signal_to_deliver(). */ if ((long) scr->pt.r10 != -1) /* * A system calls has to be restarted only if one of the error codes * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * isn't -1 then r8 doesn't hold an error code and we don't need to * restart the syscall, so we can clear the "restart" flag here. */ restart = 0; if (signr <= 0) break; if (unlikely(restart)) { switch (errno) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; case ERESTARTSYS: if ((ka.sa.sa_flags & SA_RESTART) == 0) { scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; } case ERESTARTNOINTR: ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ } } /* * Whee! Actually deliver the signal. If the delivery failed, we need to * continue to iterate in this loop so we can deliver the SIGSEGV... */ if (handle_signal(signr, &ka, &info, oldset, scr)) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; return; } } /* Did we come from a system call? */ if (restart) { /* Restart the system call - no handlers present */ if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR || errno == ERESTART_RESTARTBLOCK) { /* * Note: the syscall number is in r15 which is saved in * pt_regs so all we need to do here is adjust ip so that * the "break" instruction gets re-executed. */ ia64_decrement_ip(&scr->pt); if (errno == ERESTART_RESTARTBLOCK) scr->pt.r15 = __NR_restart_syscall; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } }
gpl-2.0
TeamVilleC2/android_kernel_htc_liberty-villec2
drivers/media/video/pvrusb2/pvrusb2-main.c
8967
4484
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include "pvrusb2-hdw.h" #include "pvrusb2-devattr.h" #include "pvrusb2-context.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS #include "pvrusb2-sysfs.h" #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" #define DRIVER_VERSION "V4L in-tree version" #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ PVR2_TRACE_INFO| \ PVR2_TRACE_STD| \ PVR2_TRACE_TOLERANCE| \ PVR2_TRACE_TRAP| \ 0) int pvrusb2_debug = DEFAULT_DEBUG_MASK; module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "Debug trace mask"); #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS static struct pvr2_sysfs_class *class_ptr = NULL; #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ static void pvr_setup_attach(struct pvr2_context *pvr) { /* Create association with v4l layer */ pvr2_v4l2_create(pvr); #ifdef CONFIG_VIDEO_PVRUSB2_DVB /* Create association with dvb layer */ pvr2_dvb_create(pvr); #endif #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS pvr2_sysfs_create(pvr,class_ptr); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ } static int pvr_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct pvr2_context *pvr; /* Create underlying hardware interface */ pvr = pvr2_context_create(intf,devid,pvr_setup_attach); if (!pvr) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to create hdw handler"); return -ENOMEM; } pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); usb_set_intfdata(intf, pvr); return 0; } /* * pvr_disconnect() * */ static void pvr_disconnect(struct usb_interface *intf) { struct pvr2_context *pvr = usb_get_intfdata(intf); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); usb_set_intfdata (intf, NULL); pvr2_context_disconnect(pvr); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); } static struct usb_driver pvr_driver = { .name = "pvrusb2", .id_table = pvr2_device_table, .probe = pvr_probe, .disconnect = pvr_disconnect }; /* * pvr_init() / pvr_exit() * * This code is run to initialize/exit the driver. * */ static int __init pvr_init(void) { int ret; pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); ret = pvr2_context_global_init(); if (ret != 0) { pvr2_trace(PVR2_TRACE_INIT,"pvr_init failure code=%d",ret); return ret; } #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS class_ptr = pvr2_sysfs_class_create(); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ ret = usb_register(&pvr_driver); if (ret == 0) printk(KERN_INFO "pvrusb2: " DRIVER_VERSION ":" DRIVER_DESC "\n"); if (pvrusb2_debug) printk(KERN_INFO "pvrusb2: Debug mask is %d (0x%x)\n", pvrusb2_debug,pvrusb2_debug); pvr2_trace(PVR2_TRACE_INIT,"pvr_init complete"); return ret; } static void __exit pvr_exit(void) { pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); usb_deregister(&pvr_driver); pvr2_context_global_done(); #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS pvr2_sysfs_class_destroy(class_ptr); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ pvr2_trace(PVR2_TRACE_INIT,"pvr_exit complete"); } module_init(pvr_init); module_exit(pvr_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION("0.9.1"); /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 70 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
supertoast/kernel-2.6.38.6-U8815-Gingerbread
arch/frv/kernel/futex.c
12039
6709
/* futex.c: futex operations * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/futex.h> #include <asm/errno.h> /* * the various futex operations; MMU fault checking is ignored under no-MMU * conditions */ static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " add %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " or %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " and %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " xor %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } /*****************************************************************************/ /* * do the futex operations */ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); switch (op) { case FUTEX_OP_SET: ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval); break; case FUTEX_OP_ADD: ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval); break; case FUTEX_OP_OR: ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval); break; case FUTEX_OP_ANDN: ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval); break; case FUTEX_OP_XOR: ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval); break; default: ret = -ENOSYS; break; } pagefault_enable(); if (!ret) { switch (cmp) { case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; default: ret = -ENOSYS; break; } } return ret; } /* end futex_atomic_op_inuser() */
gpl-2.0
sleekmason/LG-V510-Kitkat
arch/arm/mach-imx/clock-imx25.c
8
11026
/* * Copyright (C) 2009 by Sascha Hauer, Pengutronix * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/clkdev.h> #include <mach/clock.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/mx25.h> #define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR) #define CCM_MPCTL 0x00 #define CCM_UPCTL 0x04 #define CCM_CCTL 0x08 #define CCM_CGCR0 0x0C #define CCM_CGCR1 0x10 #define CCM_CGCR2 0x14 #define CCM_PCDR0 0x18 #define CCM_PCDR1 0x1C #define CCM_PCDR2 0x20 #define CCM_PCDR3 0x24 #define CCM_RCSR 0x28 #define CCM_CRDR 0x2C #define CCM_DCVR0 0x30 #define CCM_DCVR1 0x34 #define CCM_DCVR2 0x38 #define CCM_DCVR3 0x3c #define CCM_LTR0 0x40 #define CCM_LTR1 0x44 #define CCM_LTR2 0x48 #define CCM_LTR3 0x4c static unsigned long get_rate_mpll(void) { ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL); return mxc_decode_pll(mpctl, 24000000); } static unsigned long get_rate_upll(void) { ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL); return mxc_decode_pll(mpctl, 24000000); } unsigned long get_rate_arm(struct clk *clk) { unsigned long cctl = readl(CRM_BASE + CCM_CCTL); unsigned long rate = get_rate_mpll(); if (cctl & (1 << 14)) rate = (rate * 3) >> 2; return rate / ((cctl >> 30) + 1); } static unsigned long get_rate_ahb(struct clk *clk) { unsigned long cctl = readl(CRM_BASE + CCM_CCTL); return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1); } static unsigned long get_rate_ipg(struct clk *clk) { return get_rate_ahb(NULL) >> 1; } static unsigned long get_rate_per(int per) { unsigned long ofs = (per & 0x3) * 8; unsigned long reg = per & ~0x3; unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f; unsigned long fref; if (readl(CRM_BASE + 0x64) & (1 << per)) fref = get_rate_upll(); else fref = get_rate_ahb(NULL); return fref / (val + 1); } static unsigned long get_rate_uart(struct clk *clk) { return get_rate_per(15); } static unsigned long get_rate_ssi2(struct clk *clk) { return get_rate_per(14); } static unsigned long get_rate_ssi1(struct clk *clk) { return get_rate_per(13); } static unsigned long get_rate_i2c(struct clk *clk) { return get_rate_per(6); } static unsigned long get_rate_nfc(struct clk *clk) { return get_rate_per(8); } static unsigned long get_rate_gpt(struct clk *clk) { return get_rate_per(5); } static unsigned long get_rate_lcdc(struct clk *clk) { return get_rate_per(7); } static unsigned long get_rate_esdhc1(struct clk *clk) { return get_rate_per(3); } static unsigned long get_rate_esdhc2(struct clk *clk) { return get_rate_per(4); } static unsigned long get_rate_csi(struct clk *clk) { return get_rate_per(0); } static unsigned long get_rate_otg(struct clk *clk) { unsigned long cctl = readl(CRM_BASE + CCM_CCTL); unsigned long rate = get_rate_upll(); return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1); } static int clk_cgcr_enable(struct clk *clk) { u32 reg; reg = __raw_readl(clk->enable_reg); reg |= 1 << clk->enable_shift; __raw_writel(reg, clk->enable_reg); return 0; } static void clk_cgcr_disable(struct clk *clk) { u32 reg; reg = __raw_readl(clk->enable_reg); reg &= ~(1 << clk->enable_shift); __raw_writel(reg, clk->enable_reg); } #define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \ static struct clk name = { \ .id = i, \ .enable_reg = CRM_BASE + er, \ .enable_shift = es, \ .get_rate = gr, \ .set_rate = sr, \ .enable = clk_cgcr_enable, \ .disable = clk_cgcr_disable, \ .secondary = s, \ } /* */ DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL); DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL); DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1, NULL, NULL); DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0, 3, get_rate_esdhc1, NULL, &esdhc1_ahb_clk); DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2, NULL, NULL); DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0, 4, get_rate_esdhc2, NULL, &esdhc2_ahb_clk); DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL, NULL, NULL); DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL); DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL, NULL, NULL); DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0, 7, NULL, NULL, &lcdc_ahb_clk); DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL); DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk); DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk); DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk); DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk); DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk); DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk); DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL); DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL); DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL); DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk); DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1, 8, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(lcdc_clk, 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk); DEFINE_CLOCK(wdt_clk, 0, CCM_CGCR2, 19, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(ssi1_clk, 0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk); DEFINE_CLOCK(ssi2_clk, 1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk); DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2, 6, get_rate_ipg, NULL, &sdma_ahb_clk); DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGCR1, 13, get_rate_esdhc1, NULL, &esdhc1_per_clk); DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGCR1, 14, get_rate_esdhc2, NULL, &esdhc2_per_clk); DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL); DEFINE_CLOCK(csi_clk, 0, CCM_CGCR1, 4, get_rate_csi, NULL, &csi_per_clk); DEFINE_CLOCK(can1_clk, 0, CCM_CGCR1, 2, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(can2_clk, 1, CCM_CGCR1, 3, get_rate_ipg, NULL, NULL); DEFINE_CLOCK(iim_clk, 0, CCM_CGCR1, 26, NULL, NULL, NULL); #define _REGISTER_CLOCK(d, n, c) \ { \ .dev_id = d, \ .con_id = n, \ .clk = &c, \ }, static struct clk_lookup lookups[] = { /* */ _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk) _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk) _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk) _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk) _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk) _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk) _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk) _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk) _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk) _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) /* */ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk) _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk) _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk) _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk) _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk) _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk) _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk) _REGISTER_CLOCK("imx-keypad", NULL, kpp_clk) _REGISTER_CLOCK("mx25-adc", NULL, tsc_clk) _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk) _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk) _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk) _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk) _REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk) _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk) _REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk) _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk) _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk) _REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk) _REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk) _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk) _REGISTER_CLOCK(NULL, "audmux", audmux_clk) _REGISTER_CLOCK("flexcan.0", NULL, can1_clk) _REGISTER_CLOCK("flexcan.1", NULL, can2_clk) /* */ _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk) _REGISTER_CLOCK(NULL, "iim", iim_clk) }; int __init mx25_clocks_init(void) { clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* */ __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0); __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1); __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2); #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) clk_enable(&uart1_clk); #endif /* */ __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0), CRM_BASE + 0x64); /* */ __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64); clk_enable(&iim_clk); imx_print_silicon_rev("i.MX25", mx25_revision()); clk_disable(&iim_clk); mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); return 0; }
gpl-2.0
Aaron0927/qemu
hw/pl061.c
8
8489
/* * Arm PrimeCell PL061 General Purpose IO with additional * Luminary Micro Stellaris bits. * * Copyright (c) 2007 CodeSourcery. * Written by Paul Brook * * This code is licensed under the GPL. */ #include "sysbus.h" //#define DEBUG_PL061 1 #ifdef DEBUG_PL061 #define DPRINTF(fmt, ...) \ do { printf("pl061: " fmt , ## __VA_ARGS__); } while (0) #define BADF(fmt, ...) \ do { fprintf(stderr, "pl061: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) #else #define DPRINTF(fmt, ...) do {} while(0) #define BADF(fmt, ...) \ do { fprintf(stderr, "pl061: error: " fmt , ## __VA_ARGS__);} while (0) #endif static const uint8_t pl061_id[12] = { 0x00, 0x00, 0x00, 0x00, 0x61, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; static const uint8_t pl061_id_luminary[12] = { 0x00, 0x00, 0x00, 0x00, 0x61, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1 }; typedef struct { SysBusDevice busdev; int locked; uint8_t data; uint8_t old_data; uint8_t dir; uint8_t isense; uint8_t ibe; uint8_t iev; uint8_t im; uint8_t istate; uint8_t afsel; uint8_t dr2r; uint8_t dr4r; uint8_t dr8r; uint8_t odr; uint8_t pur; uint8_t pdr; uint8_t slr; uint8_t den; uint8_t cr; uint8_t float_high; qemu_irq irq; qemu_irq out[8]; const unsigned char *id; } pl061_state; static void pl061_update(pl061_state *s) { uint8_t changed; uint8_t mask; uint8_t out; int i; /* Outputs float high. */ /* FIXME: This is board dependent. */ out = (s->data & s->dir) | ~s->dir; changed = s->old_data ^ out; if (!changed) return; s->old_data = out; for (i = 0; i < 8; i++) { mask = 1 << i; if ((changed & mask) && s->out) { DPRINTF("Set output %d = %d\n", i, (out & mask) != 0); qemu_set_irq(s->out[i], (out & mask) != 0); } } /* FIXME: Implement input interrupts. */ } static uint32_t pl061_read(void *opaque, target_phys_addr_t offset) { pl061_state *s = (pl061_state *)opaque; if (offset >= 0xfd0 && offset < 0x1000) { return s->id[(offset - 0xfd0) >> 2]; } if (offset < 0x400) { return s->data & (offset >> 2); } switch (offset) { case 0x400: /* Direction */ return s->dir; case 0x404: /* Interrupt sense */ return s->isense; case 0x408: /* Interrupt both edges */ return s->ibe; case 0x40c: /* Interrupt event */ return s->iev; case 0x410: /* Interrupt mask */ return s->im; case 0x414: /* Raw interrupt status */ return s->istate; case 0x418: /* Masked interrupt status */ return s->istate | s->im; case 0x420: /* Alternate function select */ return s->afsel; case 0x500: /* 2mA drive */ return s->dr2r; case 0x504: /* 4mA drive */ return s->dr4r; case 0x508: /* 8mA drive */ return s->dr8r; case 0x50c: /* Open drain */ return s->odr; case 0x510: /* Pull-up */ return s->pur; case 0x514: /* Pull-down */ return s->pdr; case 0x518: /* Slew rate control */ return s->slr; case 0x51c: /* Digital enable */ return s->den; case 0x520: /* Lock */ return s->locked; case 0x524: /* Commit */ return s->cr; default: hw_error("pl061_read: Bad offset %x\n", (int)offset); return 0; } } static void pl061_write(void *opaque, target_phys_addr_t offset, uint32_t value) { pl061_state *s = (pl061_state *)opaque; uint8_t mask; if (offset < 0x400) { mask = (offset >> 2) & s->dir; s->data = (s->data & ~mask) | (value & mask); pl061_update(s); return; } switch (offset) { case 0x400: /* Direction */ s->dir = value; break; case 0x404: /* Interrupt sense */ s->isense = value; break; case 0x408: /* Interrupt both edges */ s->ibe = value; break; case 0x40c: /* Interrupt event */ s->iev = value; break; case 0x410: /* Interrupt mask */ s->im = value; break; case 0x41c: /* Interrupt clear */ s->istate &= ~value; break; case 0x420: /* Alternate function select */ mask = s->cr; s->afsel = (s->afsel & ~mask) | (value & mask); break; case 0x500: /* 2mA drive */ s->dr2r = value; break; case 0x504: /* 4mA drive */ s->dr4r = value; break; case 0x508: /* 8mA drive */ s->dr8r = value; break; case 0x50c: /* Open drain */ s->odr = value; break; case 0x510: /* Pull-up */ s->pur = value; break; case 0x514: /* Pull-down */ s->pdr = value; break; case 0x518: /* Slew rate control */ s->slr = value; break; case 0x51c: /* Digital enable */ s->den = value; break; case 0x520: /* Lock */ s->locked = (value != 0xacce551); break; case 0x524: /* Commit */ if (!s->locked) s->cr = value; break; default: hw_error("pl061_write: Bad offset %x\n", (int)offset); } pl061_update(s); } static void pl061_reset(pl061_state *s) { s->locked = 1; s->cr = 0xff; } static void pl061_set_irq(void * opaque, int irq, int level) { pl061_state *s = (pl061_state *)opaque; uint8_t mask; mask = 1 << irq; if ((s->dir & mask) == 0) { s->data &= ~mask; if (level) s->data |= mask; pl061_update(s); } } static CPUReadMemoryFunc * const pl061_readfn[] = { pl061_read, pl061_read, pl061_read }; static CPUWriteMemoryFunc * const pl061_writefn[] = { pl061_write, pl061_write, pl061_write }; static void pl061_save(QEMUFile *f, void *opaque) { pl061_state *s = (pl061_state *)opaque; qemu_put_be32(f, s->locked); qemu_put_be32(f, s->data); qemu_put_be32(f, s->old_data); qemu_put_be32(f, s->dir); qemu_put_be32(f, s->isense); qemu_put_be32(f, s->ibe); qemu_put_be32(f, s->iev); qemu_put_be32(f, s->im); qemu_put_be32(f, s->istate); qemu_put_be32(f, s->afsel); qemu_put_be32(f, s->dr2r); qemu_put_be32(f, s->dr4r); qemu_put_be32(f, s->dr8r); qemu_put_be32(f, s->odr); qemu_put_be32(f, s->pur); qemu_put_be32(f, s->pdr); qemu_put_be32(f, s->slr); qemu_put_be32(f, s->den); qemu_put_be32(f, s->cr); qemu_put_be32(f, s->float_high); } static int pl061_load(QEMUFile *f, void *opaque, int version_id) { pl061_state *s = (pl061_state *)opaque; if (version_id != 1) return -EINVAL; s->locked = qemu_get_be32(f); s->data = qemu_get_be32(f); s->old_data = qemu_get_be32(f); s->dir = qemu_get_be32(f); s->isense = qemu_get_be32(f); s->ibe = qemu_get_be32(f); s->iev = qemu_get_be32(f); s->im = qemu_get_be32(f); s->istate = qemu_get_be32(f); s->afsel = qemu_get_be32(f); s->dr2r = qemu_get_be32(f); s->dr4r = qemu_get_be32(f); s->dr8r = qemu_get_be32(f); s->odr = qemu_get_be32(f); s->pur = qemu_get_be32(f); s->pdr = qemu_get_be32(f); s->slr = qemu_get_be32(f); s->den = qemu_get_be32(f); s->cr = qemu_get_be32(f); s->float_high = qemu_get_be32(f); return 0; } static int pl061_init(SysBusDevice *dev, const unsigned char *id) { int iomemtype; pl061_state *s = FROM_SYSBUS(pl061_state, dev); s->id = id; iomemtype = cpu_register_io_memory(pl061_readfn, pl061_writefn, s, DEVICE_NATIVE_ENDIAN); sysbus_init_mmio(dev, 0x1000, iomemtype); sysbus_init_irq(dev, &s->irq); qdev_init_gpio_in(&dev->qdev, pl061_set_irq, 8); qdev_init_gpio_out(&dev->qdev, s->out, 8); pl061_reset(s); register_savevm(&dev->qdev, "pl061_gpio", -1, 1, pl061_save, pl061_load, s); return 0; } static int pl061_init_luminary(SysBusDevice *dev) { return pl061_init(dev, pl061_id_luminary); } static int pl061_init_arm(SysBusDevice *dev) { return pl061_init(dev, pl061_id); } static void pl061_register_devices(void) { sysbus_register_dev("pl061", sizeof(pl061_state), pl061_init_arm); sysbus_register_dev("pl061_luminary", sizeof(pl061_state), pl061_init_luminary); } device_init(pl061_register_devices)
gpl-2.0
yuwata/systemd
src/login/logind-brightness.c
8
8369
/* SPDX-License-Identifier: LGPL-2.1-or-later */ #include "bus-util.h" #include "device-util.h" #include "hash-funcs.h" #include "logind-brightness.h" #include "logind.h" #include "process-util.h" #include "stdio-util.h" /* Brightness and LED devices tend to be very slow to write to (often being I2C and such). Writes to the * sysfs attributes are synchronous, and hence will freeze our process on access. We can't really have that, * hence we add some complexity: whenever we need to write to the brightness attribute, we do so in a forked * off process, which terminates when it is done. Watching that process allows us to watch completion of the * write operation. * * To make this even more complex: clients are likely to send us many write requests in a short time-frame * (because they implement reactive brightness sliders on screen). Let's coalesce writes to make this * efficient: whenever we get requests to change brightness while we are still writing to the brightness * attribute, let's remember the request and restart a new one when the initial operation finished. When we * get another request while one is ongoing and one is pending we'll replace the pending one with the new * one. * * The bus messages are answered when the first write operation finishes that started either due to the * request or due to a later request that overrode the requested one. * * Yes, this is complex, but I don't see an easier way if we want to be both efficient and still support * completion notification. */ typedef struct BrightnessWriter { Manager *manager; sd_device *device; char *path; pid_t child; uint32_t brightness; bool again; Set *current_messages; Set *pending_messages; sd_event_source* child_event_source; } BrightnessWriter; static BrightnessWriter* brightness_writer_free(BrightnessWriter *w) { if (!w) return NULL; if (w->manager && w->path) (void) hashmap_remove_value(w->manager->brightness_writers, w->path, w); sd_device_unref(w->device); free(w->path); set_free(w->current_messages); set_free(w->pending_messages); w->child_event_source = sd_event_source_unref(w->child_event_source); return mfree(w); } DEFINE_TRIVIAL_CLEANUP_FUNC(BrightnessWriter*, brightness_writer_free); DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR( brightness_writer_hash_ops, char, string_hash_func, string_compare_func, BrightnessWriter, brightness_writer_free); static void brightness_writer_reply(BrightnessWriter *w, int error) { int r; assert(w); for (;;) { _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL; m = set_steal_first(w->current_messages); if (!m) break; if (error == 0) r = sd_bus_reply_method_return(m, NULL); else r = sd_bus_reply_method_errnof(m, error, "Failed to write to brightness device: %m"); if (r < 0) log_warning_errno(r, "Failed to send method reply, ignoring: %m"); } } static int brightness_writer_fork(BrightnessWriter *w); static int on_brightness_writer_exit(sd_event_source *s, const siginfo_t *si, void *userdata) { BrightnessWriter *w = ASSERT_PTR(userdata); int r; assert(s); assert(si); assert(si->si_pid == w->child); w->child = 0; w->child_event_source = sd_event_source_unref(w->child_event_source); brightness_writer_reply(w, si->si_code == CLD_EXITED && si->si_status == EXIT_SUCCESS ? 0 : -EPROTO); if (w->again) { /* Another request to change the brightness has been queued. Act on it, but make the pending * messages the current ones. */ w->again = false; set_free(w->current_messages); w->current_messages = TAKE_PTR(w->pending_messages); r = brightness_writer_fork(w); if (r >= 0) return 0; brightness_writer_reply(w, r); } brightness_writer_free(w); return 0; } static int brightness_writer_fork(BrightnessWriter *w) { int r; assert(w); assert(w->manager); assert(w->child == 0); assert(!w->child_event_source); r = safe_fork("(sd-bright)", FORK_DEATHSIG|FORK_NULL_STDIO|FORK_CLOSE_ALL_FDS|FORK_LOG|FORK_REOPEN_LOG, &w->child); if (r < 0) return r; if (r == 0) { char brs[DECIMAL_STR_MAX(uint32_t)+1]; /* Child */ xsprintf(brs, "%" PRIu32, w->brightness); r = sd_device_set_sysattr_value(w->device, "brightness", brs); if (r < 0) { log_device_error_errno(w->device, r, "Failed to write brightness to device: %m"); _exit(EXIT_FAILURE); } _exit(EXIT_SUCCESS); } r = sd_event_add_child(w->manager->event, &w->child_event_source, w->child, WEXITED, on_brightness_writer_exit, w); if (r < 0) return log_error_errno(r, "Failed to watch brightness writer child " PID_FMT ": %m", w->child); return 0; } static int set_add_message(Set **set, sd_bus_message *message) { int r; assert(set); if (!message) return 0; r = sd_bus_message_get_expect_reply(message); if (r <= 0) return r; r = set_ensure_put(set, &bus_message_hash_ops, message); if (r <= 0) return r; sd_bus_message_ref(message); return 1; } int manager_write_brightness( Manager *m, sd_device *device, uint32_t brightness, sd_bus_message *message) { _cleanup_(brightness_writer_freep) BrightnessWriter *w = NULL; BrightnessWriter *existing; const char *path; int r; assert(m); assert(device); r = sd_device_get_syspath(device, &path); if (r < 0) return log_device_error_errno(device, r, "Failed to get sysfs path for brightness device: %m"); existing = hashmap_get(m->brightness_writers, path); if (existing) { /* There's already a writer for this device. Let's update it with the new brightness, and add * our message to the set of message to reply when done. */ r = set_add_message(&existing->pending_messages, message); if (r < 0) return log_error_errno(r, "Failed to add message to set: %m"); /* We override any previously requested brightness here: we coalesce writes, and the newest * requested brightness is the one we'll put into effect. */ existing->brightness = brightness; existing->again = true; /* request another iteration of the writer when the current one is * complete */ return 0; } w = new(BrightnessWriter, 1); if (!w) return log_oom(); *w = (BrightnessWriter) { .device = sd_device_ref(device), .path = strdup(path), .brightness = brightness, }; if (!w->path) return log_oom(); r = hashmap_ensure_put(&m->brightness_writers, &brightness_writer_hash_ops, w->path, w); if (r == -ENOMEM) return log_oom(); if (r < 0) return log_error_errno(r, "Failed to add brightness writer to hashmap: %m"); w->manager = m; r = set_add_message(&w->current_messages, message); if (r < 0) return log_error_errno(r, "Failed to add message to set: %m"); r = brightness_writer_fork(w); if (r < 0) return r; TAKE_PTR(w); return 0; }
gpl-2.0
rex-xxx/mt6572_x201
bionic/libc/stdio/putchar.c
8
2070
/* $OpenBSD: putchar.c,v 1.7 2005/08/08 08:05:36 espie Exp $ */ /*- * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Chris Torek. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <stdio.h> #undef putchar_unlocked /* * A subrouting version of the macro putchar_unlocked */ int putchar_unlocked(int c) { FILE *so = stdout; return (putc_unlocked(c, so)); } #undef putchar /* * A subroutine version of the macro putchar */ int putchar(int c) { FILE *so = stdout; return (putc(c, so)); }
gpl-2.0
npe9/kitten
linux/ofed/1.5.3/drivers/net/mlx4/sense.c
8
4991
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/errno.h> #include <linux/if_ether.h> #include <linux/mlx4/cmd.h> #include "mlx4.h" static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type) { u64 out_param; int err = 0; err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B); if (err) { mlx4_err(dev, "Sense command failed for port: %d\n", port); return err; } if (out_param > 2) { mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); return EINVAL; } *type = out_param; return 0; } void mlx4_do_sense_ports(struct mlx4_dev *dev, enum mlx4_port_type *stype, enum mlx4_port_type *defaults) { struct mlx4_sense *sense = &mlx4_priv(dev)->sense; int err; int i; for (i = 1; i <= dev->caps.num_ports; i++) { stype[i - 1] = 0; if (sense->do_sense_port[i] && sense->sense_allowed[i] && dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]); if (err) stype[i - 1] = defaults[i - 1]; } else stype[i - 1] = defaults[i - 1]; } /* * Adjust port configuration: * If port 1 sensed nothing and port 2 is IB, set both as IB * If port 2 sensed nothing and port 1 is Eth, set both as Eth */ if (stype[0] == MLX4_PORT_TYPE_ETH) { for (i = 1; i < dev->caps.num_ports; i++) stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; } if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { for (i = 0; i < dev->caps.num_ports - 1; i++) stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; } /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) stype[i] = stype[i] ? stype[i] : defaults[i]; } static void mlx4_sense_port(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, sense_poll); struct mlx4_dev *dev = sense->dev; struct mlx4_priv *priv = mlx4_priv(dev); enum mlx4_port_type stype[MLX4_MAX_PORTS]; mutex_lock(&priv->port_mutex); mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); if (mlx4_check_port_params(dev, stype)) goto sense_again; if (mlx4_change_port_types(dev, stype)) mlx4_err(dev, "Failed to change port_types\n"); sense_again: mutex_unlock(&priv->port_mutex); if (sense->resched) queue_delayed_work(sense->sense_wq , &sense->sense_poll, round_jiffies(MLX4_SENSE_RANGE)); } void mlx4_start_sense(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) return; sense->resched = 1; queue_delayed_work(sense->sense_wq , &sense->sense_poll, round_jiffies(MLX4_SENSE_RANGE)); } void mlx4_stop_sense(struct mlx4_dev *dev) { mlx4_priv(dev)->sense.resched = 0; } int mlx4_sense_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; int port; sense->dev = dev; sense->sense_wq = create_singlethread_workqueue("mlx4_sense"); if (!sense->sense_wq) return -ENOMEM; for (port = 1; port <= dev->caps.num_ports; port++) sense->do_sense_port[port] = 1; INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port); return 0; } void mlx4_sense_cleanup(struct mlx4_dev *dev) { mlx4_stop_sense(dev); cancel_delayed_work(&mlx4_priv(dev)->sense.sense_poll); destroy_workqueue(mlx4_priv(dev)->sense.sense_wq); }
gpl-2.0
dduval/kernel-rhel5
arch/mips/momentum/ocelot_g/setup.c
8
7954
/* * BRIEF MODULE DESCRIPTION * Momentum Computer Ocelot-G (CP7000G) - board dependent boot routines * * Copyright (C) 1996, 1997, 2001 Ralf Baechle * Copyright (C) 2000 RidgeRun, Inc. * Copyright (C) 2001 Red Hat, Inc. * Copyright (C) 2002 Momentum Computer * * Author: Matthew Dharm, Momentum Computer * mdharm@momenco.com * * Author: RidgeRun, Inc. * glonnon@ridgerun.com, skranz@ridgerun.com, stevej@ridgerun.com * * Copyright 2001 MontaVista Software Inc. * Author: jsun@mvista.com or jsun@junsun.net * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/timex.h> #include <linux/vmalloc.h> #include <asm/time.h> #include <asm/bootinfo.h> #include <asm/page.h> #include <asm/io.h> #include <asm/gt64240.h> #include <asm/irq.h> #include <asm/pci.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <linux/bootmem.h> #include "ocelot_pld.h" #ifdef CONFIG_GALILLEO_GT64240_ETH extern unsigned char prom_mac_addr_base[6]; #endif unsigned long marvell_base; /* These functions are used for rebooting or halting the machine*/ extern void momenco_ocelot_restart(char *command); extern void momenco_ocelot_halt(void); extern void momenco_ocelot_power_off(void); extern void gt64240_time_init(void); extern void momenco_ocelot_irq_setup(void); static char reset_reason; static unsigned long ENTRYLO(unsigned long paddr) { return ((paddr & PAGE_MASK) | (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)) >> 6; } /* setup code for a handoff from a version 2 PMON 2000 PROM */ void PMON_v2_setup(void) { /* A wired TLB entry for the GT64240 and the serial port. The GT64240 is going to be hit on every IRQ anyway - there's absolutely no point in letting it be a random TLB entry, as it'll just cause needless churning of the TLB. And we use the other half for the serial port, which is just a PITA otherwise :) Device Physical Virtual GT64240 Internal Regs 0xf4000000 0xe0000000 UARTs (CS2) 0xfd000000 0xe0001000 */ add_wired_entry(ENTRYLO(0xf4000000), ENTRYLO(0xf4010000), 0xf4000000, PM_64K); add_wired_entry(ENTRYLO(0xfd000000), ENTRYLO(0xfd001000), 0xfd000000, PM_4K); /* Also a temporary entry to let us talk to the Ocelot PLD and NVRAM in the CS[012] region. We can't use ioremap() yet. The NVRAM is a ST M48T37Y, which includes NVRAM, RTC, and Watchdog functions. Ocelot PLD (CS0) 0xfc000000 0xe0020000 NVRAM (CS1) 0xfc800000 0xe0030000 */ add_temporary_entry(ENTRYLO(0xfc000000), ENTRYLO(0xfc010000), 0xfc000000, PM_64K); add_temporary_entry(ENTRYLO(0xfc800000), ENTRYLO(0xfc810000), 0xfc800000, PM_64K); marvell_base = 0xf4000000; } extern int rm7k_tcache_enabled; /* * This runs in KSEG1. See the verbiage in rm7k.c::probe_scache() */ #define Page_Invalidate_T 0x16 static void __init setup_l3cache(unsigned long size) { int register i; printk("Enabling L3 cache..."); /* Enable the L3 cache in the GT64120A's CPU Configuration register */ MV_WRITE(0, MV_READ(0) | (1<<14)); /* Enable the L3 cache in the CPU */ set_c0_config(1<<12 /* CONF_TE */); /* Clear the cache */ write_c0_taglo(0); write_c0_taghi(0); for (i=0; i < size; i+= 4096) { __asm__ __volatile__ ( ".set noreorder\n\t" ".set mips3\n\t" "cache %1, (%0)\n\t" ".set mips0\n\t" ".set reorder" : : "r" (KSEG0ADDR(i)), "i" (Page_Invalidate_T)); } /* Let the RM7000 MM code know that the tertiary cache is enabled */ rm7k_tcache_enabled = 1; printk("Done\n"); } void __init plat_mem_setup(void) { void (*l3func)(unsigned long) = (void *) KSEG1ADDR(setup_l3cache); unsigned int tmpword; board_time_init = gt64240_time_init; _machine_restart = momenco_ocelot_restart; _machine_halt = momenco_ocelot_halt; pm_power_off = momenco_ocelot_power_off; /* * initrd_start = (unsigned long)ocelot_initrd_start; * initrd_end = (unsigned long)ocelot_initrd_start + (ulong)ocelot_initrd_size; * initrd_below_start_ok = 1; */ /* do handoff reconfiguration */ PMON_v2_setup(); #ifdef CONFIG_GALILLEO_GT64240_ETH /* get the mac addr */ memcpy(prom_mac_addr_base, (void*)0xfc807cf2, 6); #endif /* Turn off the Bit-Error LED */ OCELOT_PLD_WRITE(0x80, INTCLR); tmpword = OCELOT_PLD_READ(BOARDREV); if (tmpword < 26) printk("Momenco Ocelot-G: Board Assembly Rev. %c\n", 'A'+tmpword); else printk("Momenco Ocelot-G: Board Assembly Revision #0x%x\n", tmpword); tmpword = OCELOT_PLD_READ(PLD1_ID); printk("PLD 1 ID: %d.%d\n", tmpword>>4, tmpword&15); tmpword = OCELOT_PLD_READ(PLD2_ID); printk("PLD 2 ID: %d.%d\n", tmpword>>4, tmpword&15); tmpword = OCELOT_PLD_READ(RESET_STATUS); printk("Reset reason: 0x%x\n", tmpword); reset_reason = tmpword; OCELOT_PLD_WRITE(0xff, RESET_STATUS); tmpword = OCELOT_PLD_READ(BOARD_STATUS); printk("Board Status register: 0x%02x\n", tmpword); printk(" - User jumper: %s\n", (tmpword & 0x80)?"installed":"absent"); printk(" - Boot flash write jumper: %s\n", (tmpword&0x40)?"installed":"absent"); printk(" - Tulip PHY %s connected\n", (tmpword&0x10)?"is":"not"); printk(" - L3 Cache size: %d MiB\n", (1<<((tmpword&12) >> 2))&~1); printk(" - SDRAM size: %d MiB\n", 1<<(6+(tmpword&3))); if (tmpword&12) l3func((1<<(((tmpword&12) >> 2)+20))); switch(tmpword &3) { case 3: /* 512MiB -- two banks of 256MiB */ add_memory_region( 0x0<<20, 0x100<<20, BOOT_MEM_RAM); /* add_memory_region(0x100<<20, 0x100<<20, BOOT_MEM_RAM); */ break; case 2: /* 256MiB -- two banks of 128MiB */ add_memory_region( 0x0<<20, 0x80<<20, BOOT_MEM_RAM); add_memory_region(0x80<<20, 0x80<<20, BOOT_MEM_RAM); break; case 1: /* 128MiB -- 64MiB per bank */ add_memory_region( 0x0<<20, 0x40<<20, BOOT_MEM_RAM); add_memory_region(0x40<<20, 0x40<<20, BOOT_MEM_RAM); break; case 0: /* 64MiB */ add_memory_region( 0x0<<20, 0x40<<20, BOOT_MEM_RAM); break; } /* FIXME: Fix up the DiskOnChip mapping */ MV_WRITE(0x468, 0xfef73); } /* This needs to be one of the first initcalls, because no I/O port access can work before this */ static int io_base_ioremap(void) { /* we're mapping PCI accesses from 0xc0000000 to 0xf0000000 */ unsigned long io_remap_range; io_remap_range = (unsigned long) ioremap(0xc0000000, 0x30000000); if (!io_remap_range) panic("Could not ioremap I/O port range"); set_io_port_base(io_remap_range - 0xc0000000); return 0; } module_init(io_base_ioremap);
gpl-2.0
toastcfh/android_kernel_lge_d851
arch/microblaze/kernel/ftrace.c
8
6208
/* * Ftrace support for Microblaze. * * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2009 PetaLogix * * Based on MIPS and PowerPC ftrace code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <asm/cacheflush.h> #include <linux/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; int faulted, err; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; /* */ asm volatile(" 1: lwi %0, %2, 0; \ 2: swi %3, %2, 0; \ addik %1, r0, 0; \ 3: \ .section .fixup, \"ax\"; \ 4: brid 3b; \ addik %1, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b; \ .word 2b,4b; \ .previous;" \ : "=&r" (old), "=r" (faulted) : "r" (parent), "r" (return_hooker) ); flush_dcache_range((u32)parent, (u32)parent + 4); flush_icache_range((u32)parent, (u32)parent + 4); if (unlikely(faulted)) { ftrace_graph_stop(); WARN_ON(1); return; } err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); if (err == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* */ #ifdef CONFIG_DYNAMIC_FTRACE /* */ static int ftrace_modify_code(unsigned long addr, unsigned int value) { int faulted = 0; __asm__ __volatile__(" 1: swi %2, %1, 0; \ addik %0, r0, 0; \ 2: \ .section .fixup, \"ax\"; \ 3: brid 2b; \ addik %0, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,3b; \ .previous;" \ : "=r" (faulted) : "r" (addr), "r" (value) ); if (unlikely(faulted)) return -EFAULT; flush_dcache_range(addr, addr + 4); flush_icache_range(addr, addr + 4); return 0; } #define MICROBLAZE_NOP 0x80000000 #define MICROBLAZE_BRI 0xb800000C static unsigned int recorded; /* */ static unsigned int imm; /* */ /* */ #undef USE_FTRACE_NOP #ifdef USE_FTRACE_NOP static unsigned int bralid; /* */ #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* */ int ret = 0; if (recorded == 0) { recorded = 1; imm = *(unsigned int *)rec->ip; pr_debug("%s: imm:0x%x\n", __func__, imm); #ifdef USE_FTRACE_NOP bralid = *(unsigned int *)(rec->ip + 4); pr_debug("%s: bralid 0x%x\n", __func__, bralid); #endif /* */ } #ifdef USE_FTRACE_NOP ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); #else /* */ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); #endif /* */ return ret; } /* */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); #ifdef USE_FTRACE_NOP pr_debug("%s: bralid:0x%x\n", __func__, bralid); ret += ftrace_modify_code(rec->ip + 4, bralid); #endif /* */ return ret; } int __init ftrace_dyn_arch_init(void *data) { /* */ *(unsigned long *)data = 0; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned int upper = (unsigned int)func; unsigned int lower = (unsigned int)func; int ret = 0; /* */ upper = 0xb0000000 + (upper >> 16); /* */ lower = 0x32800000 + (lower & 0xFFFF); /* */ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", __func__, (unsigned int)func, (unsigned int)ip, upper, lower); /* */ ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); /* */ ret += ftrace_modify_code((unsigned long)&ftrace_caller, MICROBLAZE_NOP); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned int old_jump; /* */ int ftrace_enable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); old_jump = *(unsigned int *)ip; /* */ ret = ftrace_modify_code(ip, MICROBLAZE_NOP); pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); return ret; } int ftrace_disable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); ret = ftrace_modify_code(ip, old_jump); pr_debug("%s\n", __func__); return ret; } #endif /* */ #endif /* */
gpl-2.0
nightrune/wireshark
ui/gtk/dissector_tables_dlg.c
8
14311
/* dissector_tables_dlg.c * dissector_tables_dlg 2010 Anders Broman * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <string.h> #include <glib.h> #include <epan/packet.h> #include <gtk/gtk.h> #include "ui/gtk/gui_utils.h" #include "ui/gtk/dlg_utils.h" #include "ui/gtk/dissector_tables_dlg.h" static GtkWidget *dissector_tables_dlg_w = NULL; /* The columns */ enum { TABLE_UI_NAME_COL, TABLE_SHORT_NAME_COL, N_COLUMNS }; static void win_destroy_cb(GtkWindow *win _U_, gpointer data _U_) { if (dissector_tables_dlg_w != NULL) { window_destroy(dissector_tables_dlg_w); dissector_tables_dlg_w = NULL; } } /* * For a dissector table, put * its short name and its * descriptive name in the treeview. */ struct dissector_tables_tree_info { GtkWidget *tree; GtkTreeIter iter; GtkTreeIter new_iter; }; typedef struct dissector_tables_tree_info dissector_tables_tree_info_t; static gint ui_sort_func(GtkTreeModel *model, GtkTreeIter *a, GtkTreeIter *b, gpointer user_data) { gchar *stra, *strb; /* The col to get data from is in userdata */ gint data_column = GPOINTER_TO_INT(user_data); gtk_tree_model_get(model, a, data_column, &stra, -1); gtk_tree_model_get(model, b, data_column, &strb, -1); return strcmp(stra, strb); } /* * Struct to hold the pointer to the trees * for dissector tables. */ struct dissector_tables_trees { GtkWidget *str_tree_wgt; GtkWidget *uint_tree_wgt; GtkWidget *heuristic_tree_wgt; }; typedef struct dissector_tables_trees dissector_tables_trees_t; static void proto_add_to_list(dissector_tables_tree_info_t *tree_info, GtkTreeStore *store, gchar *str, const gchar *proto_name) { gtk_tree_store_insert_with_values(store, &tree_info->new_iter, &tree_info->iter, G_MAXINT, TABLE_UI_NAME_COL, str, TABLE_SHORT_NAME_COL, proto_name, -1); } static void decode_proto_add_to_list (const gchar *table_name _U_, ftenum_t selector_type, gpointer key, gpointer value _U_, gpointer user_data) { GtkTreeStore *store; const gchar *proto_name; dtbl_entry_t *dtbl_entry; dissector_handle_t handle; guint32 port; gchar *str; dissector_tables_tree_info_t *tree_info; tree_info = (dissector_tables_tree_info_t *)user_data; dtbl_entry = (dtbl_entry_t*)value; handle = dtbl_entry_get_handle(dtbl_entry); proto_name = dissector_handle_get_short_name(handle); store = GTK_TREE_STORE(gtk_tree_view_get_model(GTK_TREE_VIEW(tree_info->tree))); switch (selector_type) { case FT_UINT8: case FT_UINT16: case FT_UINT24: case FT_UINT32: port = GPOINTER_TO_UINT(key); /* Hack: Use fixed width rj str so alpha sort (strcmp) will sort field numerically */ str = g_strdup_printf ("%10d", port); proto_add_to_list(tree_info, store, str, proto_name); g_free (str); break; case FT_STRING: case FT_STRINGZ: case FT_UINT_STRING: case FT_STRINGZPAD: str = (gchar*) key; proto_add_to_list(tree_info, store, str, proto_name); break; default: g_assert_not_reached(); } } static void table_name_add_to_list(dissector_tables_tree_info_t *tree_info, GtkWidget *tree_view, const char *table_name, const char *ui_name) { GtkTreeStore *store; tree_info->tree = tree_view; store = GTK_TREE_STORE(gtk_tree_view_get_model(GTK_TREE_VIEW(tree_view))); /* Get store */ gtk_tree_store_insert_with_values(store, &tree_info->iter, NULL, G_MAXINT, TABLE_UI_NAME_COL, ui_name, TABLE_SHORT_NAME_COL, table_name, -1); } static void display_heur_dissector_table_entries(gpointer data, gpointer user_data) { heur_dtbl_entry_t *dtbl_entry = (heur_dtbl_entry_t *)data; dissector_tables_tree_info_t *tree_info = (dissector_tables_tree_info_t*)user_data; GtkTreeStore *store; if (dtbl_entry->protocol) { store = GTK_TREE_STORE(gtk_tree_view_get_model(GTK_TREE_VIEW(tree_info->tree))); /* Get store */ proto_add_to_list(tree_info, store, (gchar *)proto_get_protocol_long_name(dtbl_entry->protocol), proto_get_protocol_short_name(dtbl_entry->protocol)); }else{ g_warning("no protocol info"); } } static void display_heur_dissector_table_names(const char *table_name, gpointer table, gpointer w) { dissector_tables_trees_t *dis_tbl_trees; dissector_tables_tree_info_t *tree_info; heur_dissector_list_t *list; tree_info = g_new(dissector_tables_tree_info_t, 1); dis_tbl_trees = (dissector_tables_trees_t*)w; list = (heur_dissector_list_t *)table; table_name_add_to_list(tree_info, dis_tbl_trees->heuristic_tree_wgt, "", table_name); if (table) { g_slist_foreach (*list, display_heur_dissector_table_entries, tree_info); } } static void display_dissector_table_names(const char *table_name, const char *ui_name, gpointer w) { dissector_tables_trees_t *dis_tbl_trees; dissector_tables_tree_info_t *tree_info; ftenum_t selector_type = get_dissector_table_selector_type(table_name); tree_info = g_new(dissector_tables_tree_info_t, 1); dis_tbl_trees = (dissector_tables_trees_t*)w; switch (selector_type) { case FT_UINT8: case FT_UINT16: case FT_UINT24: case FT_UINT32: table_name_add_to_list(tree_info, dis_tbl_trees->uint_tree_wgt, table_name, ui_name); break; case FT_STRING: case FT_STRINGZ: case FT_UINT_STRING: case FT_STRINGZPAD: table_name_add_to_list(tree_info, dis_tbl_trees->str_tree_wgt, table_name, ui_name); break; default: break; } dissector_table_foreach(table_name, decode_proto_add_to_list, tree_info); g_free(tree_info); } static GtkWidget * init_table(void) { GtkTreeStore *store; GtkWidget *tree; GtkTreeView *tree_view; GtkTreeViewColumn *column; GtkCellRenderer *renderer; GtkTreeSortable *sortable; /* Create the store */ store = gtk_tree_store_new (N_COLUMNS, /* Total number of columns */ G_TYPE_STRING, /* Table */ G_TYPE_STRING); /* Table */ /* Create a view */ tree = gtk_tree_view_new_with_model (GTK_TREE_MODEL (store)); tree_view = GTK_TREE_VIEW(tree); sortable = GTK_TREE_SORTABLE(store); /* Speed up the list display */ gtk_tree_view_set_fixed_height_mode(tree_view, TRUE); /* Setup the sortable columns */ gtk_tree_view_set_headers_clickable(GTK_TREE_VIEW (tree), FALSE); /* The view now holds a reference. We can get rid of our own reference */ g_object_unref (G_OBJECT (store)); /* Create the first column, associating the "text" attribute of the * cell_renderer to the first column of the model */ renderer = gtk_cell_renderer_text_new (); column = gtk_tree_view_column_new_with_attributes ("UI name", renderer, "text", TABLE_UI_NAME_COL, NULL); gtk_tree_sortable_set_sort_func(sortable, TABLE_UI_NAME_COL, ui_sort_func, GINT_TO_POINTER(TABLE_UI_NAME_COL), NULL); gtk_tree_view_column_set_sort_column_id(column, TABLE_UI_NAME_COL); gtk_tree_view_column_set_resizable(column, TRUE); gtk_tree_view_column_set_sizing(column, GTK_TREE_VIEW_COLUMN_FIXED); gtk_tree_view_column_set_min_width(column, 80); gtk_tree_view_column_set_fixed_width(column, 330); gtk_tree_view_append_column (GTK_TREE_VIEW (tree_view), column); renderer = gtk_cell_renderer_text_new (); column = gtk_tree_view_column_new_with_attributes ("Short name", renderer, "text", TABLE_SHORT_NAME_COL, NULL); gtk_tree_sortable_set_sort_func(sortable, TABLE_SHORT_NAME_COL, ui_sort_func, GINT_TO_POINTER(TABLE_SHORT_NAME_COL), NULL); gtk_tree_view_column_set_sort_column_id(column, TABLE_SHORT_NAME_COL); gtk_tree_view_column_set_resizable(column, TRUE); gtk_tree_view_column_set_sizing(column, GTK_TREE_VIEW_COLUMN_FIXED); gtk_tree_view_column_set_min_width(column, 80); gtk_tree_view_column_set_fixed_width(column, 100); gtk_tree_view_append_column (GTK_TREE_VIEW (tree_view), column); return tree; } static void dissector_tables_dlg_init(void) { dissector_tables_trees_t dis_tbl_trees; GtkWidget *vbox; GtkWidget *hbox; GtkWidget *main_nb; GtkWidget *scrolled_window; GtkTreeSortable *sortable; GtkWidget *temp_page, *tmp; dissector_tables_dlg_w = dlg_window_new("Dissector tables"); /* transient_for top_level */ gtk_window_set_destroy_with_parent (GTK_WINDOW(dissector_tables_dlg_w), TRUE); gtk_window_set_default_size(GTK_WINDOW(dissector_tables_dlg_w), 700, 300); vbox=ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_add(GTK_CONTAINER(dissector_tables_dlg_w), vbox); gtk_container_set_border_width(GTK_CONTAINER(vbox), 12); main_nb = gtk_notebook_new(); gtk_box_pack_start(GTK_BOX(vbox), main_nb, TRUE, TRUE, 0); /* String tables */ temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE); tmp = gtk_label_new("String tables"); gtk_widget_show(tmp); hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE); gtk_box_pack_start(GTK_BOX (hbox), tmp, TRUE, TRUE, 0); gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox); scrolled_window = scrolled_window_new(NULL, NULL); dis_tbl_trees.str_tree_wgt = init_table(); gtk_widget_show(dis_tbl_trees.str_tree_wgt); gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.str_tree_wgt); gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0); gtk_widget_show(scrolled_window); /* uint tables */ temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE); tmp = gtk_label_new("Integer tables"); gtk_widget_show(tmp); hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE); gtk_box_pack_start(GTK_BOX (hbox), tmp, TRUE, TRUE, 0); gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox); scrolled_window = scrolled_window_new(NULL, NULL); dis_tbl_trees.uint_tree_wgt = init_table(); gtk_widget_show(dis_tbl_trees.uint_tree_wgt); gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.uint_tree_wgt); gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0); gtk_widget_show(scrolled_window); /* heuristic tables */ temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE); tmp = gtk_label_new("Heuristic tables"); gtk_widget_show(tmp); hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE); gtk_box_pack_start(GTK_BOX(hbox), tmp, TRUE, TRUE, 0); gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox); scrolled_window = scrolled_window_new(NULL, NULL); dis_tbl_trees.heuristic_tree_wgt = init_table(); gtk_widget_show(dis_tbl_trees.heuristic_tree_wgt); gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.heuristic_tree_wgt); gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0); gtk_widget_show(scrolled_window); /* We must display TOP LEVEL Widget before calling init_table() */ gtk_widget_show_all(dissector_tables_dlg_w); g_signal_connect(dissector_tables_dlg_w, "destroy", G_CALLBACK(win_destroy_cb), NULL); /* Fill the table with data */ dissector_all_tables_foreach_table(display_dissector_table_names, (gpointer)&dis_tbl_trees, NULL); dissector_all_heur_tables_foreach_table(display_heur_dissector_table_names, (gpointer)&dis_tbl_trees); sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.str_tree_wgt))); gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING); sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.uint_tree_wgt))); gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING); sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.heuristic_tree_wgt))); gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING); } void dissector_tables_dlg_cb(GtkWidget *w _U_, gpointer d _U_) { if (dissector_tables_dlg_w) { reactivate_window(dissector_tables_dlg_w); } else { dissector_tables_dlg_init(); } } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 4 * tab-width: 8 * indent-tabs-mode: nil * End: * * vi: set shiftwidth=4 tabstop=8 expandtab: * :indentSize=4:tabSize=8:noTabs=true: */
gpl-2.0
andr7e/android_kernel_elephone_p6000
kernel/mediatek/kernel/drivers/combo/drv_wlan/mt6628/wlan/mgmt/scan.c
8
105786
/* ** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/mgmt/scan.c#3 $ */ /*! \file "scan.c" \brief This file defines the scan profile and the processing function of scan result for SCAN Module. The SCAN Profile selection is part of SCAN MODULE and responsible for defining SCAN Parameters - e.g. MIN_CHANNEL_TIME, number of scan channels. In this file we also define the process of SCAN Result including adding, searching and removing SCAN record from the list. */ /* ** $Log: scan.c $ ** ** 01 30 2013 yuche.tsai ** [ALPS00451578] [JB2][WFD][Case Fail][JE][MR1]?????????[Java (JE),660,-1361051648,99,/data/core/,0,system_server_crash,system_server]JE happens when try to connect WFD.(4/5) ** Fix possible old scan result indicated to supplicant after formation. ** ** 01 16 2013 cp.wu ** [ALPS00429083] [Need Patch] [Volunteer Patch][MT6620 Wi-Fi] Improve AP-IOT compatibility against AP whose timestamp will reset unexpectedly ** when BSS-DESC is re-allocated, all information needs to be filled * * 07 17 2012 yuche.tsai * NULL * Let netdev bring up. * * 07 17 2012 yuche.tsai * NULL * Compile no error before trial run. * * 06 25 2012 cp.wu * [WCXRP00001258] [MT6620][MT5931][MT6628][Driver] Do not use stale scan result for deciding connection target * drop off scan result which is older than 5 seconds when choosing which BSS to join * * 03 02 2012 terry.wu * NULL * Sync CFG80211 modification from branch 2,2. * * 01 16 2012 cp.wu * [WCXRP00001169] [MT6620 Wi-Fi][Driver] API and behavior modification for preferred band configuration with corresponding network configuration * correct typo. * * 01 16 2012 cp.wu * [MT6620 Wi-Fi][Driver] API and behavior modification for preferred band configuration with corresponding network configuration * add wlanSetPreferBandByNetwork() for glue layer to invoke for setting preferred band configuration corresponding to network type. * * 12 05 2011 cp.wu * [WCXRP00001131] [MT6620 Wi-Fi][Driver][AIS] Implement connect-by-BSSID path * add CONNECT_BY_BSSID policy * * 11 23 2011 cp.wu * [WCXRP00001123] [MT6620 Wi-Fi][Driver] Add option to disable beacon content change detection * add compile option to disable beacon content change detection. * * 11 04 2011 cp.wu * [WCXRP00001085] [MT6628 Wi-Fi][Driver] deprecate old BSS-DESC if timestamp is reset with received beacon/probe response frames * deprecate old BSS-DESC when timestamp in received beacon/probe response frames showed a smaller value than before * * 10 11 2011 cm.chang * [WCXRP00001031] [All Wi-Fi][Driver] Check HT IE length to avoid wrong SCO parameter * Ignore HT OP IE if its length field is not valid * * 09 30 2011 cp.wu * [WCXRP00001021] [MT5931][Driver] Correct scan result generation for conversion between BSS type and operation mode * correct type casting issue. * * 08 23 2011 yuche.tsai * NULL * Fix multicast address list issue. * * 08 11 2011 cp.wu * [WCXRP00000830] [MT6620 Wi-Fi][Firmware] Use MDRDY counter to detect empty channel for shortening scan time * sparse channel detection: * driver: collect sparse channel information with scan-done event * * 08 10 2011 cp.wu * [WCXRP00000922] [MT6620 Wi-Fi][Driver] traverse whole BSS-DESC list for removing * traverse whole BSS-DESC list because BSSID is not unique anymore. * * 07 12 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * for multiple BSS descriptior detecting issue: * 1) check BSSID for infrastructure network * 2) check SSID for AdHoc network * * 07 12 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * check for BSSID for beacons used to update DTIM * * 07 12 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * do not check BSS descriptor for connected flag due to linksys's hidden SSID will use another BSS descriptor and never connected * * 07 11 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * just pass beacons with the same BSSID. * * 07 11 2011 wh.su * [WCXRP00000849] [MT6620 Wi-Fi][Driver] Remove some of the WAPI define for make sure the value is initialize, for customer not enable WAPI * For make sure wapi initial value is set. * * 06 28 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * Do not check for SSID as beacon content change due to the existence of single BSSID with multiple SSID AP configuration * * 06 27 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * 1. correct logic * 2. replace only BSS-DESC which doesn't have a valid SSID. * * 06 27 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * remove unused temporal variable reference. * * 06 27 2011 cp.wu * [WCXRP00000815] [MT6620 Wi-Fi][Driver] allow single BSSID with multiple SSID settings to work around some tricky AP which use space character as hidden SSID * allow to have a single BSSID with multiple SSID to be presented in scanning result * * 06 02 2011 cp.wu * [WCXRP00000757] [MT6620 Wi-Fi][Driver][SCN] take use of RLM API to filter out BSS in disallowed channels * filter out BSS in disallowed channel by * 1. do not add to scan result array if BSS is at disallowed channel * 2. do not allow to search for BSS-DESC in disallowed channels * * 05 02 2011 cm.chang * [WCXRP00000691] [MT6620 Wi-Fi][Driver] Workaround about AP's wrong HT capability IE to have wrong channel number * Refine range of valid channel number * * 05 02 2011 cp.wu * [MT6620 Wi-Fi][Driver] Take parsed result for channel information instead of hardware channel number passed from firmware domain * take parsed result for generating scanning result with channel information. * * 05 02 2011 cm.chang * [WCXRP00000691] [MT6620 Wi-Fi][Driver] Workaround about AP's wrong HT capability IE to have wrong channel number * Check if channel is valided before record ing BSS channel * * 04 18 2011 terry.wu * [WCXRP00000660] [MT6620 Wi-Fi][Driver] Remove flag CFG_WIFI_DIRECT_MOVED * Remove flag CFG_WIFI_DIRECT_MOVED. * * 04 14 2011 cm.chang * [WCXRP00000634] [MT6620 Wi-Fi][Driver][FW] 2nd BSS will not support 40MHz bandwidth for concurrency * . * * 04 12 2011 eddie.chen * [WCXRP00000617] [MT6620 Wi-Fi][DRV/FW] Fix for sigma * Fix the sta index in processing security frame * Simple flow control for TC4 to avoid mgt frames for PS STA to occupy the TC4 * Add debug message. * * 03 25 2011 yuche.tsai * NULL * Always update Bss Type, for Bss Type for P2P Network is changing every time. * * 03 23 2011 yuche.tsai * NULL * Fix concurrent issue when AIS scan result would overwrite p2p scan result. * * 03 14 2011 cp.wu * [WCXRP00000535] [MT6620 Wi-Fi][Driver] Fixed channel operation when AIS and Tethering are operating concurrently * filtering out other BSS coming from adjacent channels * * 03 11 2011 chinglan.wang * [WCXRP00000537] [MT6620 Wi-Fi][Driver] Can not connect to 802.11b/g/n mixed AP with WEP security. * . * * 03 11 2011 cp.wu * [WCXRP00000535] [MT6620 Wi-Fi][Driver] Fixed channel operation when AIS and Tethering are operating concurrently * When fixed channel operation is necessary, AIS-FSM would scan and only connect for BSS on the specific channel * * 02 24 2011 cp.wu * [WCXRP00000490] [MT6620 Wi-Fi][Driver][Win32] modify kalMsleep() implementation because NdisMSleep() won't sleep long enough for specified interval such as 500ms * implement beacon change detection by checking SSID and supported rate. * * 02 22 2011 yuche.tsai * [WCXRP00000480] [Volunteer Patch][MT6620][Driver] WCS IE format issue * Fix WSC big endian issue. * * 02 21 2011 terry.wu * [WCXRP00000476] [MT6620 Wi-Fi][Driver] Clean P2P scan list while removing P2P * Clean P2P scan list while removing P2P. * * 01 27 2011 yuche.tsai * [WCXRP00000399] [Volunteer Patch][MT6620/MT5931][Driver] Fix scan side effect after P2P module separate. * Fix scan channel extension issue when p2p module is not registered. * * 01 26 2011 cm.chang * [WCXRP00000395] [MT6620 Wi-Fi][Driver][FW] Search STA_REC with additional net type index argument * . * * 01 21 2011 cp.wu * [WCXRP00000380] [MT6620 Wi-Fi][Driver] SSID information should come from buffered BSS_DESC_T rather than using beacon-carried information * SSID should come from buffered prBssDesc rather than beacon-carried information * * 01 14 2011 yuche.tsai * [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue * Fix compile error. * * 01 14 2011 yuche.tsai * [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue * Memfree for P2P Descriptor & P2P Descriptor List. * * 01 14 2011 yuche.tsai * [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue * Free P2P Descriptor List & Descriptor under BSS Descriptor. * * 01 04 2011 cp.wu * [WCXRP00000338] [MT6620 Wi-Fi][Driver] Separate kalMemAlloc into kmalloc and vmalloc implementations to ease physically continous memory demands * 1) correct typo in scan.c * 2) TX descriptors, RX descriptos and management buffer should use virtually continous buffer instead of physically contineous one * * 01 04 2011 cp.wu * [WCXRP00000338] [MT6620 Wi-Fi][Driver] Separate kalMemAlloc into kmalloc and vmalloc implementations to ease physically continous memory demands * separate kalMemAlloc() into virtually-continous and physically-continous type to ease slab system pressure * * 12 31 2010 cp.wu * [WCXRP00000327] [MT6620 Wi-Fi][Driver] Improve HEC WHQA 6972 workaround coverage in driver side * while being unloaded, clear all pending interrupt then set LP-own to firmware * * 12 21 2010 cp.wu * [WCXRP00000280] [MT6620 Wi-Fi][Driver] Enable BSS selection with best RCPI policy in SCN module * SCN: enable BEST RSSI selection policy support * * 11 29 2010 cp.wu * [WCXRP00000210] [MT6620 Wi-Fi][Driver][FW] Set RCPI value in STA_REC for initial TX rate selection of auto-rate algorithm * update ucRcpi of STA_RECORD_T for AIS when * 1) Beacons for IBSS merge is received * 2) Associate Response for a connecting peer is received * * 11 03 2010 wh.su * [WCXRP00000124] [MT6620 Wi-Fi] [Driver] Support the dissolve P2P Group * Refine the HT rate disallow TKIP pairwise cipher . * * 10 12 2010 cp.wu * [WCXRP00000091] [MT6620 Wi-Fi][Driver] Add scanning logic to filter out beacons which is received on the folding frequency * trust HT IE if available for 5GHz band * * 10 11 2010 cp.wu * [WCXRP00000091] [MT6620 Wi-Fi][Driver] Add scanning logic to filter out beacons which is received on the folding frequency * add timing and strenght constraint for filtering out beacons with same SSID/TA but received on different channels * * 10 08 2010 wh.su * [WCXRP00000085] [MT6620 Wif-Fi] [Driver] update the modified p2p state machine * update the frog's new p2p state machine. * * 10 01 2010 yuche.tsai * NULL * [MT6620 P2P] Fix Big Endian Issue when parse P2P device name TLV. * * 09 24 2010 cp.wu * [WCXRP00000052] [MT6620 Wi-Fi][Driver] Eliminate Linux Compile Warning * eliminate unused variables which lead gcc to argue * * 09 08 2010 cp.wu * NULL * use static memory pool for storing IEs of scanning result. * * 09 07 2010 yuche.tsai * NULL * When indicate scan result, append IE buffer information in the scan result. * * 09 03 2010 yuche.tsai * NULL * 1. Update Beacon RX count when running SLT. * 2. Ignore Beacon when running SLT, would not update information from Beacon. * * 09 03 2010 kevin.huang * NULL * Refine #include sequence and solve recursive/nested #include issue * * 08 31 2010 kevin.huang * NULL * Use LINK LIST operation to process SCAN result * * 08 29 2010 yuche.tsai * NULL * 1. Fix P2P Descriptor List to be a link list, to avoid link corrupt after Bss Descriptor Free. * 2.. Fix P2P Device Name Length BE issue. * * 08 23 2010 yuche.tsai * NULL * Add P2P Device Found Indication to supplicant * * 08 20 2010 cp.wu * NULL * reset BSS_DESC_T variables before parsing IE due to peer might have been reconfigured. * * 08 20 2010 yuche.tsai * NULL * Workaround for P2P Descriptor Infinite loop issue. * * 08 16 2010 cp.wu * NULL * Replace CFG_SUPPORT_BOW by CFG_ENABLE_BT_OVER_WIFI. * There is no CFG_SUPPORT_BOW in driver domain source. * * 08 16 2010 yuche.tsai * NULL * Modify code of processing Probe Resonse frame for P2P. * * 08 12 2010 yuche.tsai * NULL * Add function to get P2P descriptor of BSS descriptor directly. * * 08 11 2010 yuche.tsai * NULL * Modify Scan result processing for P2P module. * * 08 05 2010 yuche.tsai * NULL * Update P2P Device Discovery result add function. * * 08 03 2010 cp.wu * NULL * surpress compilation warning. * * 07 26 2010 yuche.tsai * * Add support for Probe Request & Response parsing. * * 07 21 2010 cp.wu * * 1) change BG_SCAN to ONLINE_SCAN for consistent term * 2) only clear scanning result when scan is permitted to do * * 07 21 2010 yuche.tsai * * Fix compile error for SCAN module while disabling P2P feature. * * 07 21 2010 yuche.tsai * * Add P2P Scan & Scan Result Parsing & Saving. * * 07 19 2010 wh.su * * update for security supporting. * * 07 19 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * Add Ad-Hoc support to AIS-FSM * * 07 19 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * SCN module is now able to handle multiple concurrent scanning requests * * 07 15 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * driver no longer generates probe request frames * * 07 14 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration. * remove timer in DRV-SCN. * * 07 09 2010 cp.wu * * 1) separate AIS_FSM state for two kinds of scanning. (OID triggered scan, and scan-for-connection) * 2) eliminate PRE_BSS_DESC_T, Beacon/PrebResp is now parsed in single pass * 3) implment DRV-SCN module, currently only accepts single scan request, other request will be directly dropped by returning BUSY * * 07 08 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository. * * 07 08 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * take use of RLM module for parsing/generating HT IEs for 11n capability * * 07 05 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * 1) ignore RSN checking when RSN is not turned on. * 2) set STA-REC deactivation callback as NULL * 3) add variable initialization API based on PHY configuration * * 07 05 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * correct BSS_DESC_T initialization after allocated. * * 07 02 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * 1) for event packet, no need to fill RFB. * 2) when wlanAdapterStart() failed, no need to initialize state machines * 3) after Beacon/ProbeResp parsing, corresponding BSS_DESC_T should be marked as IE-parsed * * 07 01 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * add scan uninitialization procedure * * 06 30 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * if beacon/probe-resp is received in 2.4GHz bands and there is ELEM_ID_DS_PARAM_SET IE available, * trust IE instead of RMAC information * * 06 29 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * 1) sync to. CMD/EVENT document v0.03 * 2) simplify DTIM period parsing in scan.c only, bss.c no longer parses it again. * 3) send command packet to indicate FW-PM after * a) 1st beacon is received after AIS has connected to an AP * b) IBSS-ALONE has been created * c) IBSS-MERGE has occured * * 06 28 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * send MMPDU in basic rate. * * 06 25 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * modify Beacon/ProbeResp to complete parsing, * because host software has looser memory usage restriction * * 06 23 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * integrate . * * 06 22 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * comment out RLM APIs by CFG_RLM_MIGRATION. * * 06 21 2010 yuche.tsai * [WPD00003839][MT6620 5931][P2P] Feature migration * Update P2P Function call. * * 06 21 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * RSN/PRIVACY compilation flag awareness correction * * 06 21 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * specify correct value for management frames. * * 06 18 2010 cm.chang * [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver * Provide cnmMgtPktAlloc() and alloc/free function of msg/buf * * 06 18 2010 wh.su * [WPD00003840][MT6620 5931] Security migration * migration from MT6620 firmware. * * 06 17 2010 yuche.tsai * [WPD00003839][MT6620 5931][P2P] Feature migration * Fix compile error when enable P2P function. * * 06 15 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * correct when ADHOC support is turned on. * * 06 15 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * add scan.c. * * 06 04 2010 george.huang * [BORA00000678][MT6620]WiFi LP integration * [PM] Support U-APSD for STA mode * * 05 28 2010 wh.su * [BORA00000680][MT6620] Support the statistic for Microsoft os query * adding the TKIP disallow join a HT AP code. * * 05 14 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Add more chance of JOIN retry for BG_SCAN * * 05 12 2010 kevin.huang * [BORA00000794][WIFISYS][New Feature]Power Management Support * Add Power Management - Legacy PS-POLL support. * * 04 29 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * adjsut the pre-authentication code. * * 04 27 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Add Set Slot Time and Beacon Timeout Support for AdHoc Mode * * 04 24 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * g_aprBssInfo[] depends on CFG_SUPPORT_P2P and CFG_SUPPORT_BOW * * 04 19 2010 kevin.huang * [BORA00000714][WIFISYS][New Feature]Beacon Timeout Support * Add Beacon Timeout Support and will send Null frame to diagnose connection * * 04 13 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Add new HW CH macro support * * 04 06 2010 wh.su * [BORA00000680][MT6620] Support the statistic for Microsoft os query * fixed the firmware return the broadcast frame at wrong tc. * * 03 29 2010 wh.su * [BORA00000605][WIFISYS] Phase3 Integration * let the rsn wapi IE always parsing. * * 03 24 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Not carry HT cap when being associated with b/g only AP * * 03 18 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Solve the compile warning for 'return non-void' function * * 03 16 2010 kevin.huang * [BORA00000663][WIFISYS][New Feature] AdHoc Mode Support * Add AdHoc Mode * * 03 10 2010 kevin.huang * [BORA00000654][WIFISYS][New Feature] CNM Module - Ch Manager Support * * * * * * * * * * * * * * * * * Add Channel Manager for arbitration of JOIN and SCAN Req * * 03 03 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * move the AIS specific variable for security to AIS specific structure. * * 03 01 2010 wh.su * [BORA00000605][WIFISYS] Phase3 Integration * Refine the variable and parameter for security. * * 02 26 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Fix No PKT_INFO_T issue * * 02 26 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Update outgoing ProbeRequest Frame's TX data rate * * 02 23 2010 wh.su * [BORA00000592][MT6620 Wi-Fi] Adding the security related code for driver * refine the scan procedure, reduce the WPA and WAPI IE parsing, and move the parsing to the time for join. * * 02 23 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Add support scan channel 1~14 and update scan result's frequency infou1rwduu`wvpghlqg|n`slk+mpdkb * * 02 04 2010 kevin.huang * [BORA00000603][WIFISYS] [New Feature] AAA Module Support * Add AAA Module Support, Revise Net Type to Net Type Index for array lookup * * 01 27 2010 wh.su * [BORA00000476][Wi-Fi][firmware] Add the security module initialize code * add and fixed some security function. * * 01 22 2010 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Support protection and bandwidth switch * * 01 20 2010 kevin.huang * [BORA00000569][WIFISYS] Phase 2 Integration Test * Add PHASE_2_INTEGRATION_WORK_AROUND and CFG_SUPPORT_BCM flags * * 01 11 2010 kevin.huang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * Add Deauth and Disassoc Handler * * 01 08 2010 kevin.huang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * * Refine Beacon processing, add read RF channel from RX Status * * 01 04 2010 tehuang.liu * [BORA00000018]Integrate WIFI part into BORA for the 1st time * For working out the first connection Chariot-verified version * * 12 18 2009 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * . * * Dec 12 2009 mtk01104 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Modify u2EstimatedExtraIELen for probe request * * Dec 9 2009 mtk01104 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add HT cap IE to probe request * * Dec 7 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Fix lint warning * * * Dec 3 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Update the process of SCAN Result by adding more Phy Attributes * * Dec 1 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adjust the function and code for meet the new define * * Nov 30 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Rename u4RSSI to i4RSSI * * Nov 30 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Report event of scan result to host * * Nov 26 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Fix SCAN Record update * * Nov 24 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Revise MGMT Handler with Retain Status and Integrate with TXM * * Nov 23 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add (Ext)Support Rate Set IE to ProbeReq * * Nov 20 2009 mtk02468 * [BORA00000337] To check in codes for FPGA emulation * Removed the use of SW_RFB->u2FrameLength * * Nov 20 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Fix uninitial aucMacAddress[] for ProbeReq * * Nov 16 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add scanSearchBssDescByPolicy() * * Nov 5 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * Add Send Probe Request Frame * * Oct 30 2009 mtk01461 * [BORA00000018] Integrate WIFI part into BORA for the 1st time * */ /******************************************************************************* * C O M P I L E R F L A G S ******************************************************************************** */ /******************************************************************************* * E X T E R N A L R E F E R E N C E S ******************************************************************************** */ #include "precomp.h" /******************************************************************************* * C O N S T A N T S ******************************************************************************** */ #define REPLICATED_BEACON_TIME_THRESHOLD (3000) #define REPLICATED_BEACON_FRESH_PERIOD (10000) #define REPLICATED_BEACON_STRENGTH_THRESHOLD (32) #define ROAMING_NO_SWING_RCPI_STEP (10) /******************************************************************************* * D A T A T Y P E S ******************************************************************************** */ /******************************************************************************* * P U B L I C D A T A ******************************************************************************** */ /******************************************************************************* * P R I V A T E D A T A ******************************************************************************** */ /******************************************************************************* * M A C R O S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N D E C L A R A T I O N S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N S ******************************************************************************** */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is used by SCN to initialize its variables * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scnInit ( IN P_ADAPTER_T prAdapter ) { P_SCAN_INFO_T prScanInfo; P_BSS_DESC_T prBSSDesc; PUINT_8 pucBSSBuff; UINT_32 i; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); pucBSSBuff = &prScanInfo->aucScanBuffer[0]; DBGLOG(SCN, INFO, ("->scnInit()\n")); //4 <1> Reset STATE and Message List prScanInfo->eCurrentState = SCAN_STATE_IDLE; prScanInfo->rLastScanCompletedTime = (OS_SYSTIME)0; LINK_INITIALIZE(&prScanInfo->rPendingMsgList); //4 <2> Reset link list of BSS_DESC_T kalMemZero((PVOID) pucBSSBuff, SCN_MAX_BUFFER_SIZE); LINK_INITIALIZE(&prScanInfo->rFreeBSSDescList); LINK_INITIALIZE(&prScanInfo->rBSSDescList); for (i = 0; i < CFG_MAX_NUM_BSS_LIST; i++) { prBSSDesc = (P_BSS_DESC_T)pucBSSBuff; LINK_INSERT_TAIL(&prScanInfo->rFreeBSSDescList, &prBSSDesc->rLinkEntry); pucBSSBuff += ALIGN_4(sizeof(BSS_DESC_T)); } /* Check if the memory allocation consist with this initialization function */ ASSERT(((UINT_32)pucBSSBuff - (UINT_32)&prScanInfo->aucScanBuffer[0]) == SCN_MAX_BUFFER_SIZE); /* reset freest channel information */ prScanInfo->fgIsSparseChannelValid = FALSE; return; } /* end of scnInit() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is used by SCN to uninitialize its variables * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scnUninit ( IN P_ADAPTER_T prAdapter ) { P_SCAN_INFO_T prScanInfo; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); DBGLOG(SCN, INFO, ("->scnUninit()\n")); //4 <1> Reset STATE and Message List prScanInfo->eCurrentState = SCAN_STATE_IDLE; prScanInfo->rLastScanCompletedTime = (OS_SYSTIME)0; /* NOTE(Kevin): Check rPendingMsgList ? */ //4 <2> Reset link list of BSS_DESC_T LINK_INITIALIZE(&prScanInfo->rFreeBSSDescList); LINK_INITIALIZE(&prScanInfo->rBSSDescList); return; } /* end of scnUninit() */ /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given BSSID * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByBssid ( IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { return scanSearchBssDescByBssidAndSsid(prAdapter, aucBSSID, FALSE, NULL); } /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given BSSID * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByBssidAndSsid ( IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[], IN BOOLEAN fgCheckSsid, IN P_PARAM_SSID_T prSsid ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc; P_BSS_DESC_T prDstBssDesc = (P_BSS_DESC_T)NULL; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { if(fgCheckSsid == FALSE || prSsid == NULL) { return prBssDesc; } else { if(EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prSsid->aucSsid, prSsid->u4SsidLen)) { return prBssDesc; } else if(prDstBssDesc == NULL && prBssDesc->fgIsHiddenSSID == TRUE) { prDstBssDesc = prBssDesc; } else { /* 20120206 frog: Equal BSSID but not SSID, SSID not hidden, SSID must be updated. */ COPY_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prSsid->aucSsid, prSsid->u4SsidLen); return prBssDesc; } } } } return prDstBssDesc; } /* end of scanSearchBssDescByBssid() */ /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given Transmitter Address. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucSrcAddr Given Source Address(TA). * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByTA ( IN P_ADAPTER_T prAdapter, IN UINT_8 aucSrcAddr[] ) { return scanSearchBssDescByTAAndSsid(prAdapter, aucSrcAddr, FALSE, NULL); } /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to given Transmitter Address. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucSrcAddr Given Source Address(TA). * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByTAAndSsid ( IN P_ADAPTER_T prAdapter, IN UINT_8 aucSrcAddr[], IN BOOLEAN fgCheckSsid, IN P_PARAM_SSID_T prSsid ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc; P_BSS_DESC_T prDstBssDesc = (P_BSS_DESC_T)NULL; ASSERT(prAdapter); ASSERT(aucSrcAddr); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucSrcAddr, aucSrcAddr)) { if(fgCheckSsid == FALSE || prSsid == NULL) { return prBssDesc; } else { if(EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prSsid->aucSsid, prSsid->u4SsidLen)) { return prBssDesc; } else if(prDstBssDesc == NULL && prBssDesc->fgIsHiddenSSID == TRUE) { prDstBssDesc = prBssDesc; } } } } return prDstBssDesc; } /* end of scanSearchBssDescByTA() */ /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to * given eBSSType, BSSID and Transmitter Address * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] eBSSType BSS Type of incoming Beacon/ProbeResp frame. * @param[in] aucBSSID Given BSSID of Beacon/ProbeResp frame. * @param[in] aucSrcAddr Given source address (TA) of Beacon/ProbeResp frame. * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchExistingBssDesc ( IN P_ADAPTER_T prAdapter, IN ENUM_BSS_TYPE_T eBSSType, IN UINT_8 aucBSSID[], IN UINT_8 aucSrcAddr[] ) { return scanSearchExistingBssDescWithSsid(prAdapter, eBSSType, aucBSSID, aucSrcAddr, FALSE, NULL); } /*----------------------------------------------------------------------------*/ /*! * @brief Find the corresponding BSS Descriptor according to * given eBSSType, BSSID and Transmitter Address * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] eBSSType BSS Type of incoming Beacon/ProbeResp frame. * @param[in] aucBSSID Given BSSID of Beacon/ProbeResp frame. * @param[in] aucSrcAddr Given source address (TA) of Beacon/ProbeResp frame. * @param[in] fgCheckSsid Need to check SSID or not. (for multiple SSID with single BSSID cases) * @param[in] prSsid Specified SSID * * @return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchExistingBssDescWithSsid ( IN P_ADAPTER_T prAdapter, IN ENUM_BSS_TYPE_T eBSSType, IN UINT_8 aucBSSID[], IN UINT_8 aucSrcAddr[], IN BOOLEAN fgCheckSsid, IN P_PARAM_SSID_T prSsid ) { P_SCAN_INFO_T prScanInfo; P_BSS_DESC_T prBssDesc, prIBSSBssDesc; ASSERT(prAdapter); ASSERT(aucSrcAddr); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); switch (eBSSType) { case BSS_TYPE_P2P_DEVICE: fgCheckSsid = FALSE; case BSS_TYPE_INFRASTRUCTURE: case BSS_TYPE_BOW_DEVICE: { prBssDesc = scanSearchBssDescByBssidAndSsid(prAdapter, aucBSSID, fgCheckSsid, prSsid); /* if (eBSSType == prBssDesc->eBSSType) */ return prBssDesc; } case BSS_TYPE_IBSS: { prIBSSBssDesc = scanSearchBssDescByBssidAndSsid(prAdapter, aucBSSID, fgCheckSsid, prSsid); prBssDesc = scanSearchBssDescByTAAndSsid(prAdapter, aucSrcAddr, fgCheckSsid, prSsid); /* NOTE(Kevin): * Rules to maintain the SCAN Result: * For AdHoc - * CASE I We have TA1(BSSID1), but it change its BSSID to BSSID2 * -> Update TA1 entry's BSSID. * CASE II We have TA1(BSSID1), and get TA1(BSSID1) again * -> Update TA1 entry's contain. * CASE III We have a SCAN result TA1(BSSID1), and TA2(BSSID2). Sooner or * later, TA2 merge into TA1, we get TA2(BSSID1) * -> Remove TA2 first and then replace TA1 entry's TA with TA2, Still have only one entry of BSSID. * CASE IV We have a SCAN result TA1(BSSID1), and another TA2 also merge into BSSID1. * -> Replace TA1 entry's TA with TA2, Still have only one entry. * CASE V New IBSS * -> Add this one to SCAN result. */ if (prBssDesc) { if ((!prIBSSBssDesc) || // CASE I (prBssDesc == prIBSSBssDesc)) { // CASE II return prBssDesc; } else { // CASE III P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); return prIBSSBssDesc; } } if (prIBSSBssDesc) { // CASE IV return prIBSSBssDesc; } // CASE V break; // Return NULL; } default: break; } return (P_BSS_DESC_T)NULL; } /* end of scanSearchExistingBssDesc() */ /*----------------------------------------------------------------------------*/ /*! * @brief Delete BSS Descriptors from current list according to given Remove Policy. * * @param[in] u4RemovePolicy Remove Policy. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveBssDescsByPolicy ( IN P_ADAPTER_T prAdapter, IN UINT_32 u4RemovePolicy ) { P_CONNECTION_SETTINGS_T prConnSettings; P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc; ASSERT(prAdapter); prConnSettings = &(prAdapter->rWifiVar.rConnSettings); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; //DBGLOG(SCN, TRACE, ("Before Remove - Number Of SCAN Result = %ld\n", //prBSSDescList->u4NumElem)); if (u4RemovePolicy & SCN_RM_POLICY_TIMEOUT) { P_BSS_DESC_T prBSSDescNext; OS_SYSTIME rCurrentTime; GET_CURRENT_SYSTIME(&rCurrentTime); /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(SCN_BSS_DESC_REMOVE_TIMEOUT_SEC)) ) { //DBGLOG(SCN, TRACE, ("Remove TIMEOUT BSS DESC(%#x): MAC: "MACSTR", Current Time = %08lx, Update Time = %08lx\n", //prBssDesc, MAC2STR(prBssDesc->aucBSSID), rCurrentTime, prBssDesc->rUpdateTime)); /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); } } } else if (u4RemovePolicy & SCN_RM_POLICY_OLDEST_HIDDEN) { P_BSS_DESC_T prBssDescOldest = (P_BSS_DESC_T)NULL; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } if (!prBssDesc->fgIsHiddenSSID) { continue; } if (!prBssDescOldest) { /* 1st element */ prBssDescOldest = prBssDesc; continue; } if (TIME_BEFORE(prBssDesc->rUpdateTime, prBssDescOldest->rUpdateTime)) { prBssDescOldest = prBssDesc; } } if (prBssDescOldest) { //DBGLOG(SCN, TRACE, ("Remove OLDEST HIDDEN BSS DESC(%#x): MAC: "MACSTR", Update Time = %08lx\n", //prBssDescOldest, MAC2STR(prBssDescOldest->aucBSSID), prBssDescOldest->rUpdateTime)); /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDescOldest); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDescOldest->rLinkEntry); } } else if (u4RemovePolicy & SCN_RM_POLICY_SMART_WEAKEST) { P_BSS_DESC_T prBssDescWeakest = (P_BSS_DESC_T)NULL; P_BSS_DESC_T prBssDescWeakestSameSSID = (P_BSS_DESC_T)NULL; UINT_32 u4SameSSIDCount = 0; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } if ((!prBssDesc->fgIsHiddenSSID) && (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen))) { u4SameSSIDCount++; if (!prBssDescWeakestSameSSID) { prBssDescWeakestSameSSID = prBssDesc; } else if (prBssDesc->ucRCPI < prBssDescWeakestSameSSID->ucRCPI) { prBssDescWeakestSameSSID = prBssDesc; } } if (!prBssDescWeakest) { /* 1st element */ prBssDescWeakest = prBssDesc; continue; } if (prBssDesc->ucRCPI < prBssDescWeakest->ucRCPI) { prBssDescWeakest = prBssDesc; } } if ((u4SameSSIDCount >= SCN_BSS_DESC_SAME_SSID_THRESHOLD) && (prBssDescWeakestSameSSID)) { prBssDescWeakest = prBssDescWeakestSameSSID; } if (prBssDescWeakest) { //DBGLOG(SCN, TRACE, ("Remove WEAKEST BSS DESC(%#x): MAC: "MACSTR", Update Time = %08lx\n", //prBssDescOldest, MAC2STR(prBssDescOldest->aucBSSID), prBssDescOldest->rUpdateTime)); /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDescWeakest); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDescWeakest->rLinkEntry); } } else if (u4RemovePolicy & SCN_RM_POLICY_ENTIRE) { P_BSS_DESC_T prBSSDescNext; LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { if ((u4RemovePolicy & SCN_RM_POLICY_EXCLUDE_CONNECTED) && (prBssDesc->fgIsConnected || prBssDesc->fgIsConnecting)) { /* Don't remove the one currently we are connected. */ continue; } /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); } } return; } /* end of scanRemoveBssDescsByPolicy() */ /*----------------------------------------------------------------------------*/ /*! * @brief Delete BSS Descriptors from current list according to given BSSID. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] aucBSSID Given BSSID. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveBssDescByBssid ( IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T)NULL; P_BSS_DESC_T prBSSDescNext; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; /* Check if such BSS Descriptor exists in a valid list */ LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); /* BSSID is not unique, so need to traverse whols link-list */ } } return; } /* end of scanRemoveBssDescByBssid() */ /*----------------------------------------------------------------------------*/ /*! * @brief Delete BSS Descriptors from current list according to given band configuration * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] eBand Given band * @param[in] eNetTypeIndex AIS - Remove IBSS/Infrastructure BSS * BOW - Remove BOW BSS * P2P - Remove P2P BSS * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveBssDescByBandAndNetwork ( IN P_ADAPTER_T prAdapter, IN ENUM_BAND_T eBand, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T)NULL; P_BSS_DESC_T prBSSDescNext; BOOLEAN fgToRemove; ASSERT(prAdapter); ASSERT(eBand <= BAND_NUM); ASSERT(eNetTypeIndex <= NETWORK_TYPE_INDEX_NUM); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; if(eBand == BAND_NULL) { return; /* no need to do anything, keep all scan result */ } /* Check if such BSS Descriptor exists in a valid list */ LINK_FOR_EACH_ENTRY_SAFE(prBssDesc, prBSSDescNext, prBSSDescList, rLinkEntry, BSS_DESC_T) { fgToRemove = FALSE; if(prBssDesc->eBand == eBand) { switch (eNetTypeIndex) { case NETWORK_TYPE_AIS_INDEX: if((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) || (prBssDesc->eBSSType == BSS_TYPE_IBSS)) { fgToRemove = TRUE; } break; case NETWORK_TYPE_P2P_INDEX: if(prBssDesc->eBSSType == BSS_TYPE_P2P_DEVICE) { fgToRemove = TRUE; } break; case NETWORK_TYPE_BOW_INDEX: if(prBssDesc->eBSSType == BSS_TYPE_BOW_DEVICE) { fgToRemove = TRUE; } break; default: ASSERT(0); break; } } if(fgToRemove == TRUE) { /* Remove this BSS Desc from the BSS Desc list */ LINK_REMOVE_KNOWN_ENTRY(prBSSDescList, prBssDesc); /* Return this BSS Desc to the free BSS Desc list. */ LINK_INSERT_TAIL(prFreeBSSDescList, &prBssDesc->rLinkEntry); } } return; } /* end of scanRemoveBssDescByBand() */ /*----------------------------------------------------------------------------*/ /*! * @brief Clear the CONNECTION FLAG of a specified BSS Descriptor. * * @param[in] aucBSSID Given BSSID. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID scanRemoveConnFlagOfBssDescByBssid ( IN P_ADAPTER_T prAdapter, IN UINT_8 aucBSSID[] ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T)NULL; ASSERT(prAdapter); ASSERT(aucBSSID); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; /* Search BSS Desc from current SCAN result list. */ LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, aucBSSID)) { prBssDesc->fgIsConnected = FALSE; prBssDesc->fgIsConnecting = FALSE; /* BSSID is not unique, so need to traverse whols link-list */ } } return; } /* end of scanRemoveConnectionFlagOfBssDescByBssid() */ /*----------------------------------------------------------------------------*/ /*! * @brief Allocate new BSS_DESC_T * * @param[in] prAdapter Pointer to the Adapter structure. * * @return Pointer to BSS Descriptor, if has free space. NULL, if has no space. */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanAllocateBssDesc ( IN P_ADAPTER_T prAdapter ) { P_SCAN_INFO_T prScanInfo; P_LINK_T prFreeBSSDescList; P_BSS_DESC_T prBssDesc; ASSERT(prAdapter); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prFreeBSSDescList = &prScanInfo->rFreeBSSDescList; LINK_REMOVE_HEAD(prFreeBSSDescList, prBssDesc, P_BSS_DESC_T); if (prBssDesc) { P_LINK_T prBSSDescList; kalMemZero(prBssDesc, sizeof(BSS_DESC_T)); #if CFG_ENABLE_WIFI_DIRECT LINK_INITIALIZE(&(prBssDesc->rP2pDeviceList)); prBssDesc->fgIsP2PPresent = FALSE; #endif /* CFG_ENABLE_WIFI_DIRECT */ prBSSDescList = &prScanInfo->rBSSDescList; /* NOTE(Kevin): In current design, this new empty BSS_DESC_T will be * inserted to BSSDescList immediately. */ LINK_INSERT_TAIL(prBSSDescList, &prBssDesc->rLinkEntry); } return prBssDesc; } /* end of scanAllocateBssDesc() */ /*----------------------------------------------------------------------------*/ /*! * @brief This API parses Beacon/ProbeResp frame and insert extracted BSS_DESC_T * with IEs into prAdapter->rWifiVar.rScanInfo.aucScanBuffer * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] prSwRfb Pointer to the receiving frame buffer. * * @return Pointer to BSS Descriptor * NULL if the Beacon/ProbeResp frame is invalid */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanAddToBssDesc ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_BSS_DESC_T prBssDesc = NULL; UINT_16 u2CapInfo; ENUM_BSS_TYPE_T eBSSType = BSS_TYPE_INFRASTRUCTURE; PUINT_8 pucIE; UINT_16 u2IELength; UINT_16 u2Offset = 0; P_WLAN_BEACON_FRAME_T prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T)NULL; P_IE_SSID_T prIeSsid = (P_IE_SSID_T)NULL; P_IE_SUPPORTED_RATE_T prIeSupportedRate = (P_IE_SUPPORTED_RATE_T)NULL; P_IE_EXT_SUPPORTED_RATE_T prIeExtSupportedRate = (P_IE_EXT_SUPPORTED_RATE_T)NULL; P_HIF_RX_HEADER_T prHifRxHdr; UINT_8 ucHwChannelNum = 0; UINT_8 ucIeDsChannelNum = 0; UINT_8 ucIeHtChannelNum = 0; BOOLEAN fgIsValidSsid = FALSE, fgEscape = FALSE; PARAM_SSID_T rSsid; UINT_64 u8Timestamp; BOOLEAN fgIsNewBssDesc = FALSE; UINT_32 i; UINT_8 ucSSIDChar; ASSERT(prAdapter); ASSERT(prSwRfb); prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T)prSwRfb->pvHeader; WLAN_GET_FIELD_16(&prWlanBeaconFrame->u2CapInfo, &u2CapInfo); WLAN_GET_FIELD_64(&prWlanBeaconFrame->au4Timestamp[0], &u8Timestamp); // decide BSS type switch (u2CapInfo & CAP_INFO_BSS_TYPE) { case CAP_INFO_ESS: /* It can also be Group Owner of P2P Group. */ eBSSType = BSS_TYPE_INFRASTRUCTURE; break; case CAP_INFO_IBSS: eBSSType = BSS_TYPE_IBSS; break; case 0: /* The P2P Device shall set the ESS bit of the Capabilities field in the Probe Response fame to 0 and IBSS bit to 0. (3.1.2.1.1) */ eBSSType = BSS_TYPE_P2P_DEVICE; break; #if CFG_ENABLE_BT_OVER_WIFI // @TODO: add rule to identify BOW beacons #endif default: return NULL; } //4 <1.1> Pre-parse SSID IE pucIE = prWlanBeaconFrame->aucInfoElem; u2IELength = (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16)OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]); if (u2IELength > CFG_IE_BUFFER_SIZE) { u2IELength = CFG_IE_BUFFER_SIZE; } IE_FOR_EACH(pucIE, u2IELength, u2Offset) { switch (IE_ID(pucIE)) { case ELEM_ID_SSID: if (IE_LEN(pucIE) <= ELEM_MAX_LEN_SSID) { ucSSIDChar = '\0'; /* D-Link DWL-900AP+ */ if (IE_LEN(pucIE) == 0) { fgIsValidSsid = FALSE; } /* Cisco AP1230A - (IE_LEN(pucIE) == 1) && (SSID_IE(pucIE)->aucSSID[0] == '\0') */ /* Linksys WRK54G/ASUS WL520g - (IE_LEN(pucIE) == n) && (SSID_IE(pucIE)->aucSSID[0~(n-1)] == '\0') */ else { for (i = 0; i < IE_LEN(pucIE); i++) { ucSSIDChar |= SSID_IE(pucIE)->aucSSID[i]; } if (ucSSIDChar) { fgIsValidSsid = TRUE; } } /* Update SSID to BSS Descriptor only if SSID is not hidden. */ if (fgIsValidSsid == TRUE) { COPY_SSID(rSsid.aucSsid, rSsid.u4SsidLen, SSID_IE(pucIE)->aucSSID, SSID_IE(pucIE)->ucLength); } } fgEscape = TRUE; break; default: break; } if(fgEscape == TRUE) { break; } } //4 <1.2> Replace existing BSS_DESC_T or allocate a new one prBssDesc = scanSearchExistingBssDescWithSsid(prAdapter, eBSSType, (PUINT_8)prWlanBeaconFrame->aucBSSID, (PUINT_8)prWlanBeaconFrame->aucSrcAddr, fgIsValidSsid, fgIsValidSsid == TRUE ? &rSsid : NULL); if (prBssDesc == (P_BSS_DESC_T)NULL) { fgIsNewBssDesc = TRUE; do { //4 <1.2.1> First trial of allocation prBssDesc = scanAllocateBssDesc(prAdapter); if (prBssDesc) { break; } //4 <1.2.2> Hidden is useless, remove the oldest hidden ssid. (for passive scan) scanRemoveBssDescsByPolicy(prAdapter, (SCN_RM_POLICY_EXCLUDE_CONNECTED | SCN_RM_POLICY_OLDEST_HIDDEN)); //4 <1.2.3> Second tail of allocation prBssDesc = scanAllocateBssDesc(prAdapter); if (prBssDesc) { break; } //4 <1.2.4> Remove the weakest one /* If there are more than half of BSS which has the same ssid as connection * setting, remove the weakest one from them. * Else remove the weakest one. */ scanRemoveBssDescsByPolicy(prAdapter, (SCN_RM_POLICY_EXCLUDE_CONNECTED | SCN_RM_POLICY_SMART_WEAKEST)); //4 <1.2.5> reallocation prBssDesc = scanAllocateBssDesc(prAdapter); if (prBssDesc) { break; } //4 <1.2.6> no space, should not happen //ASSERT(0); // still no space available ? return NULL; } while(FALSE); } else { OS_SYSTIME rCurrentTime; // WCXRP00000091 // if the received strength is much weaker than the original one, // ignore it due to it might be received on the folding frequency GET_CURRENT_SYSTIME(&rCurrentTime); if (prBssDesc->eBSSType != eBSSType) { prBssDesc->eBSSType = eBSSType; } else if(HIF_RX_HDR_GET_CHNL_NUM(prSwRfb->prHifRxHdr) != prBssDesc->ucChannelNum && prBssDesc->ucRCPI > prSwRfb->prHifRxHdr->ucRcpi) { // for signal strength is too much weaker and previous beacon is not stale if((prBssDesc->ucRCPI - prSwRfb->prHifRxHdr->ucRcpi) >= REPLICATED_BEACON_STRENGTH_THRESHOLD && rCurrentTime - prBssDesc->rUpdateTime <= REPLICATED_BEACON_FRESH_PERIOD) { return prBssDesc; } // for received beacons too close in time domain else if(rCurrentTime - prBssDesc->rUpdateTime <= REPLICATED_BEACON_TIME_THRESHOLD) { return prBssDesc; } } /* if Timestamp has been reset, re-generate BSS DESC 'cause AP should have reset itself */ if(prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE && u8Timestamp < prBssDesc->u8TimeStamp.QuadPart) { BOOLEAN fgIsConnected, fgIsConnecting; /* set flag for indicating this is a new BSS-DESC */ fgIsNewBssDesc = TRUE; /* backup 2 flags for APs which reset timestamp unexpectedly */ fgIsConnected = prBssDesc->fgIsConnected; fgIsConnecting = prBssDesc->fgIsConnecting; scanRemoveBssDescByBssid(prAdapter, prBssDesc->aucBSSID); prBssDesc = scanAllocateBssDesc(prAdapter); if (!prBssDesc) { return NULL; } /* restore */ prBssDesc->fgIsConnected = fgIsConnected; prBssDesc->fgIsConnecting = fgIsConnecting; } } /* NOTE: Keep consistency of Scan Record during JOIN process */ if (fgIsNewBssDesc == FALSE && prBssDesc->fgIsConnecting) { return prBssDesc; } //4 <2> Get information from Fixed Fields prBssDesc->eBSSType = eBSSType; /* Update the latest BSS type information. */ COPY_MAC_ADDR(prBssDesc->aucSrcAddr, prWlanBeaconFrame->aucSrcAddr); COPY_MAC_ADDR(prBssDesc->aucBSSID, prWlanBeaconFrame->aucBSSID); prBssDesc->u8TimeStamp.QuadPart = u8Timestamp; WLAN_GET_FIELD_16(&prWlanBeaconFrame->u2BeaconInterval, &prBssDesc->u2BeaconInterval); prBssDesc->u2CapInfo = u2CapInfo; //4 <2.1> Retrieve IEs for later parsing u2IELength = (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16)OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]); if (u2IELength > CFG_IE_BUFFER_SIZE) { u2IELength = CFG_IE_BUFFER_SIZE; prBssDesc->fgIsIEOverflow = TRUE; } else { prBssDesc->fgIsIEOverflow = FALSE; } prBssDesc->u2IELength = u2IELength; kalMemCopy(prBssDesc->aucIEBuf, prWlanBeaconFrame->aucInfoElem, u2IELength); //4 <2.2> reset prBssDesc variables in case that AP has been reconfigured prBssDesc->fgIsERPPresent = FALSE; prBssDesc->fgIsHTPresent = FALSE; prBssDesc->eSco = CHNL_EXT_SCN; prBssDesc->fgIEWAPI = FALSE; #if CFG_RSN_MIGRATION prBssDesc->fgIERSN = FALSE; #endif #if CFG_PRIVACY_MIGRATION prBssDesc->fgIEWPA = FALSE; #endif //4 <3.1> Full IE parsing on SW_RFB_T pucIE = prWlanBeaconFrame->aucInfoElem; IE_FOR_EACH(pucIE, u2IELength, u2Offset) { switch (IE_ID(pucIE)) { case ELEM_ID_SSID: if ((!prIeSsid) && /* NOTE(Kevin): for Atheros IOT #1 */ (IE_LEN(pucIE) <= ELEM_MAX_LEN_SSID)) { BOOLEAN fgIsHiddenSSID = FALSE; ucSSIDChar = '\0'; prIeSsid = (P_IE_SSID_T)pucIE; /* D-Link DWL-900AP+ */ if (IE_LEN(pucIE) == 0) { fgIsHiddenSSID = TRUE; } /* Cisco AP1230A - (IE_LEN(pucIE) == 1) && (SSID_IE(pucIE)->aucSSID[0] == '\0') */ /* Linksys WRK54G/ASUS WL520g - (IE_LEN(pucIE) == n) && (SSID_IE(pucIE)->aucSSID[0~(n-1)] == '\0') */ else { for (i = 0; i < IE_LEN(pucIE); i++) { ucSSIDChar |= SSID_IE(pucIE)->aucSSID[i]; } if (!ucSSIDChar) { fgIsHiddenSSID = TRUE; } } /* Update SSID to BSS Descriptor only if SSID is not hidden. */ if (!fgIsHiddenSSID) { COPY_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, SSID_IE(pucIE)->aucSSID, SSID_IE(pucIE)->ucLength); } } break; case ELEM_ID_SUP_RATES: /* NOTE(Kevin): Buffalo WHR-G54S's supported rate set IE exceed 8. * IE_LEN(pucIE) == 12, "1(B), 2(B), 5.5(B), 6(B), 9(B), 11(B), * 12(B), 18(B), 24(B), 36(B), 48(B), 54(B)" */ /* TP-LINK will set extra and incorrect ie with ELEM_ID_SUP_RATES */ if ((!prIeSupportedRate) && (IE_LEN(pucIE) <= RATE_NUM)) { prIeSupportedRate = SUP_RATES_IE(pucIE); } break; case ELEM_ID_DS_PARAM_SET: if (IE_LEN(pucIE) == ELEM_MAX_LEN_DS_PARAMETER_SET) { ucIeDsChannelNum = DS_PARAM_IE(pucIE)->ucCurrChnl; } break; case ELEM_ID_TIM: if (IE_LEN(pucIE) <= ELEM_MAX_LEN_TIM) { prBssDesc->ucDTIMPeriod = TIM_IE(pucIE)->ucDTIMPeriod; } break; case ELEM_ID_IBSS_PARAM_SET: if (IE_LEN(pucIE) == ELEM_MAX_LEN_IBSS_PARAMETER_SET){ prBssDesc->u2ATIMWindow = IBSS_PARAM_IE(pucIE)->u2ATIMWindow; } break; #if 0 //CFG_SUPPORT_802_11D case ELEM_ID_COUNTRY_INFO: prBssDesc->prIECountry = (P_IE_COUNTRY_T)pucIE; break; #endif case ELEM_ID_ERP_INFO: if (IE_LEN(pucIE) == ELEM_MAX_LEN_ERP) { prBssDesc->fgIsERPPresent = TRUE; } break; case ELEM_ID_EXTENDED_SUP_RATES: if (!prIeExtSupportedRate) { prIeExtSupportedRate = EXT_SUP_RATES_IE(pucIE); } break; #if CFG_RSN_MIGRATION case ELEM_ID_RSN: if (rsnParseRsnIE(prAdapter, RSN_IE(pucIE), &prBssDesc->rRSNInfo)) { prBssDesc->fgIERSN = TRUE; prBssDesc->u2RsnCap = prBssDesc->rRSNInfo.u2RsnCap; if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) { rsnCheckPmkidCache(prAdapter, prBssDesc); } } break; #endif case ELEM_ID_HT_CAP: prBssDesc->fgIsHTPresent = TRUE; break; case ELEM_ID_HT_OP: if (IE_LEN(pucIE) != (sizeof(IE_HT_OP_T) - 2)) { break; } if ((((P_IE_HT_OP_T) pucIE)->ucInfo1 & HT_OP_INFO1_SCO) != CHNL_EXT_RES) { prBssDesc->eSco = (ENUM_CHNL_EXT_T) (((P_IE_HT_OP_T) pucIE)->ucInfo1 & HT_OP_INFO1_SCO); } ucIeHtChannelNum = ((P_IE_HT_OP_T) pucIE)->ucPrimaryChannel; break; #if CFG_SUPPORT_WAPI case ELEM_ID_WAPI: if (wapiParseWapiIE(WAPI_IE(pucIE), &prBssDesc->rIEWAPI)) { prBssDesc->fgIEWAPI = TRUE; } break; #endif case ELEM_ID_VENDOR: // ELEM_ID_P2P, ELEM_ID_WMM { UINT_8 ucOuiType; UINT_16 u2SubTypeVersion; #if CFG_PRIVACY_MIGRATION if (rsnParseCheckForWFAInfoElem(prAdapter, pucIE, &ucOuiType, &u2SubTypeVersion)) { if ((ucOuiType == VENDOR_OUI_TYPE_WPA) && (u2SubTypeVersion == VERSION_WPA)) { if (rsnParseWpaIE(prAdapter, WPA_IE(pucIE), &prBssDesc->rWPAInfo)) { prBssDesc->fgIEWPA = TRUE; } } } #endif #if CFG_ENABLE_WIFI_DIRECT if(prAdapter->fgIsP2PRegistered) { if (p2pFuncParseCheckForP2PInfoElem(prAdapter, pucIE, &ucOuiType)) { if (ucOuiType == VENDOR_OUI_TYPE_P2P) { prBssDesc->fgIsP2PPresent = TRUE; } } } #endif /* CFG_ENABLE_WIFI_DIRECT */ } break; /* no default */ } } //4 <3.2> Save information from IEs - SSID /* Update Flag of Hidden SSID for used in SEARCH STATE. */ /* NOTE(Kevin): in current driver, the ucSSIDLen == 0 represent * all cases of hidden SSID. * If the fgIsHiddenSSID == TRUE, it means we didn't get the ProbeResp with * valid SSID. */ if (prBssDesc->ucSSIDLen == 0) { prBssDesc->fgIsHiddenSSID = TRUE; } else { prBssDesc->fgIsHiddenSSID = FALSE; } //4 <3.3> Check rate information in related IEs. if (prIeSupportedRate || prIeExtSupportedRate) { rateGetRateSetFromIEs(prIeSupportedRate, prIeExtSupportedRate, &prBssDesc->u2OperationalRateSet, &prBssDesc->u2BSSBasicRateSet, &prBssDesc->fgIsUnknownBssBasicRate); } //4 <4> Update information from HIF RX Header { prHifRxHdr = prSwRfb->prHifRxHdr; ASSERT(prHifRxHdr); //4 <4.1> Get TSF comparison result prBssDesc->fgIsLargerTSF = HIF_RX_HDR_GET_TCL_FLAG(prHifRxHdr); //4 <4.2> Get Band information prBssDesc->eBand = HIF_RX_HDR_GET_RF_BAND(prHifRxHdr); //4 <4.2> Get channel and RCPI information ucHwChannelNum = HIF_RX_HDR_GET_CHNL_NUM(prHifRxHdr); if (BAND_2G4 == prBssDesc->eBand) { /* Update RCPI if in right channel */ if (ucIeDsChannelNum >= 1 && ucIeDsChannelNum <= 14) { // Receive Beacon/ProbeResp frame from adjacent channel. if ((ucIeDsChannelNum == ucHwChannelNum) || (prHifRxHdr->ucRcpi > prBssDesc->ucRCPI)) { prBssDesc->ucRCPI = prHifRxHdr->ucRcpi; } // trust channel information brought by IE prBssDesc->ucChannelNum = ucIeDsChannelNum; } else if(ucIeHtChannelNum >= 1 && ucIeHtChannelNum <= 14) { // Receive Beacon/ProbeResp frame from adjacent channel. if ((ucIeHtChannelNum == ucHwChannelNum) || (prHifRxHdr->ucRcpi > prBssDesc->ucRCPI)) { prBssDesc->ucRCPI = prHifRxHdr->ucRcpi; } // trust channel information brought by IE prBssDesc->ucChannelNum = ucIeHtChannelNum; } else { prBssDesc->ucRCPI = prHifRxHdr->ucRcpi; prBssDesc->ucChannelNum = ucHwChannelNum; } } // 5G Band else { if(ucIeHtChannelNum >= 1 && ucIeHtChannelNum < 200) { // Receive Beacon/ProbeResp frame from adjacent channel. if ((ucIeHtChannelNum == ucHwChannelNum) || (prHifRxHdr->ucRcpi > prBssDesc->ucRCPI)) { prBssDesc->ucRCPI = prHifRxHdr->ucRcpi; } // trust channel information brought by IE prBssDesc->ucChannelNum = ucIeHtChannelNum; } else { /* Always update RCPI */ prBssDesc->ucRCPI = prHifRxHdr->ucRcpi; prBssDesc->ucChannelNum = ucHwChannelNum; } } } //4 <5> PHY type setting prBssDesc->ucPhyTypeSet = 0; if (BAND_2G4 == prBssDesc->eBand) { /* check if support 11n */ if (prBssDesc->fgIsHTPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_HT; } /* if not 11n only */ if (!(prBssDesc->u2BSSBasicRateSet & RATE_SET_BIT_HT_PHY)) { /* check if support 11g */ if ((prBssDesc->u2OperationalRateSet & RATE_SET_OFDM) || prBssDesc->fgIsERPPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_ERP; } /* if not 11g only */ if (!(prBssDesc->u2BSSBasicRateSet & RATE_SET_OFDM)) { /* check if support 11b */ if ((prBssDesc->u2OperationalRateSet & RATE_SET_HR_DSSS)) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_HR_DSSS; } } } } else { /* (BAND_5G == prBssDesc->eBande) */ /* check if support 11n */ if (prBssDesc->fgIsHTPresent) { prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_HT; } /* if not 11n only */ if (!(prBssDesc->u2BSSBasicRateSet & RATE_SET_BIT_HT_PHY)) { /* Support 11a definitely */ prBssDesc->ucPhyTypeSet |= PHY_TYPE_BIT_OFDM; ASSERT(!(prBssDesc->u2OperationalRateSet & RATE_SET_HR_DSSS)); } } //4 <6> Update BSS_DESC_T's Last Update TimeStamp. GET_CURRENT_SYSTIME(&prBssDesc->rUpdateTime); return prBssDesc; } /*----------------------------------------------------------------------------*/ /*! * @brief Convert the Beacon or ProbeResp Frame in SW_RFB_T to scan result for query * * @param[in] prSwRfb Pointer to the receiving SW_RFB_T structure. * * @retval WLAN_STATUS_SUCCESS It is a valid Scan Result and been sent to the host. * @retval WLAN_STATUS_FAILURE It is not a valid Scan Result. */ /*----------------------------------------------------------------------------*/ WLAN_STATUS scanAddScanResult ( IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBssDesc, IN P_SW_RFB_T prSwRfb ) { P_SCAN_INFO_T prScanInfo; UINT_8 aucRatesEx[PARAM_MAX_LEN_RATES_EX]; P_WLAN_BEACON_FRAME_T prWlanBeaconFrame; PARAM_MAC_ADDRESS rMacAddr; PARAM_SSID_T rSsid; ENUM_PARAM_NETWORK_TYPE_T eNetworkType; PARAM_802_11_CONFIG_T rConfiguration; ENUM_PARAM_OP_MODE_T eOpMode; UINT_8 ucRateLen = 0; UINT_32 i; ASSERT(prAdapter); ASSERT(prSwRfb); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); if (prBssDesc->eBand == BAND_2G4) { if ((prBssDesc->u2OperationalRateSet & RATE_SET_OFDM) || prBssDesc->fgIsERPPresent) { eNetworkType = PARAM_NETWORK_TYPE_OFDM24; } else { eNetworkType = PARAM_NETWORK_TYPE_DS; } } else { ASSERT(prBssDesc->eBand == BAND_5G); eNetworkType = PARAM_NETWORK_TYPE_OFDM5; } if(prBssDesc->eBSSType == BSS_TYPE_P2P_DEVICE) { /* NOTE(Kevin): Not supported by WZC(TBD) */ return WLAN_STATUS_FAILURE; } prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T)prSwRfb->pvHeader; COPY_MAC_ADDR(rMacAddr, prWlanBeaconFrame->aucBSSID); COPY_SSID(rSsid.aucSsid, rSsid.u4SsidLen, prBssDesc->aucSSID, prBssDesc->ucSSIDLen); rConfiguration.u4Length = sizeof(PARAM_802_11_CONFIG_T); rConfiguration.u4BeaconPeriod = (UINT_32) prWlanBeaconFrame->u2BeaconInterval; rConfiguration.u4ATIMWindow = prBssDesc->u2ATIMWindow; rConfiguration.u4DSConfig = nicChannelNum2Freq(prBssDesc->ucChannelNum); rConfiguration.rFHConfig.u4Length = sizeof(PARAM_802_11_CONFIG_FH_T); rateGetDataRatesFromRateSet(prBssDesc->u2OperationalRateSet, 0, aucRatesEx, &ucRateLen); /* NOTE(Kevin): Set unused entries, if any, at the end of the array to 0. * from OID_802_11_BSSID_LIST */ for (i = ucRateLen; i < sizeof(aucRatesEx) / sizeof(aucRatesEx[0]) ; i++) { aucRatesEx[i] = 0; } switch(prBssDesc->eBSSType) { case BSS_TYPE_IBSS: eOpMode = NET_TYPE_IBSS; break; case BSS_TYPE_INFRASTRUCTURE: case BSS_TYPE_P2P_DEVICE: case BSS_TYPE_BOW_DEVICE: default: eOpMode = NET_TYPE_INFRA; break; } kalIndicateBssInfo(prAdapter->prGlueInfo, (PUINT_8)prSwRfb->pvHeader, prSwRfb->u2PacketLen, prBssDesc->ucChannelNum, RCPI_TO_dBm(prBssDesc->ucRCPI)); nicAddScanResult(prAdapter, rMacAddr, &rSsid, prWlanBeaconFrame->u2CapInfo & CAP_INFO_PRIVACY ? 1 : 0, RCPI_TO_dBm(prBssDesc->ucRCPI), eNetworkType, &rConfiguration, eOpMode, aucRatesEx, prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen, (PUINT_8)((UINT_32)(prSwRfb->pvHeader) + WLAN_MAC_MGMT_HEADER_LEN)); return WLAN_STATUS_SUCCESS; } /* end of scanAddScanResult() */ /*----------------------------------------------------------------------------*/ /*! * @brief Parse the content of given Beacon or ProbeResp Frame. * * @param[in] prSwRfb Pointer to the receiving SW_RFB_T structure. * * @retval WLAN_STATUS_SUCCESS if not report this SW_RFB_T to host * @retval WLAN_STATUS_PENDING if report this SW_RFB_T to host as scan result */ /*----------------------------------------------------------------------------*/ WLAN_STATUS scanProcessBeaconAndProbeResp ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_CONNECTION_SETTINGS_T prConnSettings; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T)NULL; WLAN_STATUS rStatus = WLAN_STATUS_SUCCESS; P_BSS_INFO_T prAisBssInfo; P_WLAN_BEACON_FRAME_T prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T)NULL; #if CFG_SLT_SUPPORT P_SLT_INFO_T prSltInfo = (P_SLT_INFO_T)NULL; #endif ASSERT(prAdapter); ASSERT(prSwRfb); //4 <0> Ignore invalid Beacon Frame if ((prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) < (TIMESTAMP_FIELD_LEN + BEACON_INTERVAL_FIELD_LEN + CAP_INFO_FIELD_LEN)) { #ifndef _lint ASSERT(0); #endif /* _lint */ return rStatus; } #if CFG_SLT_SUPPORT prSltInfo = &prAdapter->rWifiVar.rSltInfo; if (prSltInfo->fgIsDUT) { DBGLOG(P2P, INFO, ("\n\rBCN: RX\n")); prSltInfo->u4BeaconReceiveCnt++; return WLAN_STATUS_SUCCESS; } else { return WLAN_STATUS_SUCCESS; } #endif prConnSettings = &(prAdapter->rWifiVar.rConnSettings); prAisBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX]); prWlanBeaconFrame = (P_WLAN_BEACON_FRAME_T)prSwRfb->pvHeader; /*ALPS01475157: don't show SSID on scan list for multicast MAC AP*/ if (IS_BMCAST_MAC_ADDR(prWlanBeaconFrame->aucSrcAddr)) return rStatus; //4 <1> Parse and add into BSS_DESC_T prBssDesc = scanAddToBssDesc(prAdapter, prSwRfb); if (prBssDesc) { //4 <1.1> Beacon Change Detection for Connected BSS if(prAisBssInfo->eConnectionState == PARAM_MEDIA_STATE_CONNECTED && ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE && prConnSettings->eOPMode != NET_TYPE_IBSS) || (prBssDesc->eBSSType == BSS_TYPE_IBSS && prConnSettings->eOPMode != NET_TYPE_INFRA)) && EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prAisBssInfo->aucBSSID) && EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prAisBssInfo->aucSSID, prAisBssInfo->ucSSIDLen)) { BOOLEAN fgNeedDisconnect = FALSE; #if CFG_SUPPORT_BEACON_CHANGE_DETECTION // <1.1.2> check if supported rate differs if(prAisBssInfo->u2OperationalRateSet != prBssDesc->u2OperationalRateSet) { fgNeedDisconnect = TRUE; } #endif // <1.1.3> beacon content change detected, disconnect immediately if(fgNeedDisconnect == TRUE) { aisBssBeaconTimeout(prAdapter); } } //4 <1.1> Update AIS_BSS_INFO if(((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE && prConnSettings->eOPMode != NET_TYPE_IBSS) || (prBssDesc->eBSSType == BSS_TYPE_IBSS && prConnSettings->eOPMode != NET_TYPE_INFRA))) { if (prAisBssInfo->eConnectionState == PARAM_MEDIA_STATE_CONNECTED) { /* *not* checking prBssDesc->fgIsConnected anymore, * due to Linksys AP uses " " as hidden SSID, and would have different BSS descriptor */ if ((!prAisBssInfo->ucDTIMPeriod) && EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prAisBssInfo->aucBSSID) && (prAisBssInfo->eCurrentOPMode == OP_MODE_INFRASTRUCTURE) && ((prWlanBeaconFrame->u2FrameCtrl & MASK_FRAME_TYPE) == MAC_FRAME_BEACON)) { prAisBssInfo->ucDTIMPeriod = prBssDesc->ucDTIMPeriod; /* sync with firmware for beacon information */ nicPmIndicateBssConnected(prAdapter, NETWORK_TYPE_AIS_INDEX); } } #if CFG_SUPPORT_ADHOC if (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen) && (prBssDesc->eBSSType == BSS_TYPE_IBSS) && (prAisBssInfo->eCurrentOPMode == OP_MODE_IBSS)) { ibssProcessMatchedBeacon(prAdapter, prAisBssInfo, prBssDesc, prSwRfb->prHifRxHdr->ucRcpi); } #endif /* CFG_SUPPORT_ADHOC */ } rlmProcessBcn(prAdapter, prSwRfb, ((P_WLAN_BEACON_FRAME_T)(prSwRfb->pvHeader))->aucInfoElem, (prSwRfb->u2PacketLen - prSwRfb->u2HeaderLen) - (UINT_16)(OFFSET_OF(WLAN_BEACON_FRAME_BODY_T, aucInfoElem[0]))); //4 <3> Send SW_RFB_T to HIF when we perform SCAN for HOST if(prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE || prBssDesc->eBSSType == BSS_TYPE_IBSS) { /* for AIS, send to host */ if (prConnSettings->fgIsScanReqIssued && rlmDomainIsLegalChannel(prAdapter, prBssDesc->eBand, prBssDesc->ucChannelNum) == TRUE) { ENUM_BAND_T eBand; UINT_8 ucChannel; BOOLEAN fgAddToScanResult; /* check ucChannelNum/eBand for adjacement channel filtering */ if(cnmAisInfraChannelFixed(prAdapter, &eBand, &ucChannel) == TRUE && (eBand != prBssDesc->eBand || ucChannel != prBssDesc->ucChannelNum)) { fgAddToScanResult = FALSE; } else { fgAddToScanResult = TRUE; } if(fgAddToScanResult == TRUE) { rStatus = scanAddScanResult(prAdapter, prBssDesc, prSwRfb); } } } #if CFG_ENABLE_WIFI_DIRECT if(prAdapter->fgIsP2PRegistered) { scanP2pProcessBeaconAndProbeResp( prAdapter, prSwRfb, &rStatus, prBssDesc, prWlanBeaconFrame); } #endif } return rStatus; } /* end of scanProcessBeaconAndProbeResp() */ /*----------------------------------------------------------------------------*/ /*! * \brief Search the Candidate of BSS Descriptor for JOIN(Infrastructure) or * MERGE(AdHoc) according to current Connection Policy. * * \return Pointer to BSS Descriptor, if found. NULL, if not found */ /*----------------------------------------------------------------------------*/ P_BSS_DESC_T scanSearchBssDescByPolicy ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex ) { P_CONNECTION_SETTINGS_T prConnSettings; P_BSS_INFO_T prBssInfo; P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; P_SCAN_INFO_T prScanInfo; P_LINK_T prBSSDescList; P_BSS_DESC_T prBssDesc = (P_BSS_DESC_T)NULL; P_BSS_DESC_T prPrimaryBssDesc = (P_BSS_DESC_T)NULL; P_BSS_DESC_T prCandidateBssDesc = (P_BSS_DESC_T)NULL; P_STA_RECORD_T prStaRec = (P_STA_RECORD_T)NULL; P_STA_RECORD_T prPrimaryStaRec; P_STA_RECORD_T prCandidateStaRec = (P_STA_RECORD_T)NULL; OS_SYSTIME rCurrentTime; /* The first one reach the check point will be our candidate */ BOOLEAN fgIsFindFirst = (BOOLEAN)FALSE; BOOLEAN fgIsFindBestRSSI = (BOOLEAN)FALSE; BOOLEAN fgIsFindBestEncryptionLevel = (BOOLEAN)FALSE; //BOOLEAN fgIsFindMinChannelLoad = (BOOLEAN)FALSE; /* TODO(Kevin): Support Min Channel Load */ //UINT_8 aucChannelLoad[CHANNEL_NUM] = {0}; BOOLEAN fgIsFixedChannel; ENUM_BAND_T eBand; UINT_8 ucChannel; ASSERT(prAdapter); prConnSettings = &(prAdapter->rWifiVar.rConnSettings); prBssInfo = &(prAdapter->rWifiVar.arBssInfo[eNetTypeIndex]); prAisSpecBssInfo = &(prAdapter->rWifiVar.rAisSpecificBssInfo); prScanInfo = &(prAdapter->rWifiVar.rScanInfo); prBSSDescList = &prScanInfo->rBSSDescList; GET_CURRENT_SYSTIME(&rCurrentTime); /* check for fixed channel operation */ if(eNetTypeIndex == NETWORK_TYPE_AIS_INDEX) { fgIsFixedChannel = cnmAisInfraChannelFixed(prAdapter, &eBand, &ucChannel); } else { fgIsFixedChannel = FALSE; } #if DBG if (prConnSettings->ucSSIDLen < ELEM_MAX_LEN_SSID) { prConnSettings->aucSSID[prConnSettings->ucSSIDLen] = '\0'; } #endif DBGLOG(SCN, INFO, ("SEARCH: Num Of BSS_DESC_T = %lu, Look for SSID: %s\n", prBSSDescList->u4NumElem, prConnSettings->aucSSID)); //4 <1> The outer loop to search for a candidate. LINK_FOR_EACH_ENTRY(prBssDesc, prBSSDescList, rLinkEntry, BSS_DESC_T) { /* TODO(Kevin): Update Minimum Channel Load Information here */ DBGLOG(SCN, INFO, ("SEARCH: ["MACSTR"], SSID:%s\n", MAC2STR(prBssDesc->aucBSSID), prBssDesc->aucSSID)); //4 <2> Check PHY Type and attributes //4 <2.1> Check Unsupported BSS PHY Type if (!(prBssDesc->ucPhyTypeSet & (prAdapter->rWifiVar.ucAvailablePhyTypeSet))) { DBGLOG(SCN, INFO, ("SEARCH: Ignore unsupported ucPhyTypeSet = %x\n", prBssDesc->ucPhyTypeSet)); continue; } //4 <2.2> Check if has unknown NonHT BSS Basic Rate Set. if (prBssDesc->fgIsUnknownBssBasicRate) { continue; } //4 <2.3> Check if fixed operation cases should be aware if (fgIsFixedChannel == TRUE && (prBssDesc->eBand != eBand || prBssDesc->ucChannelNum != ucChannel)) { continue; } //4 <2.4> Check if the channel is legal under regulatory domain if(rlmDomainIsLegalChannel(prAdapter, prBssDesc->eBand, prBssDesc->ucChannelNum) == FALSE) { continue; } //4 <2.5> Check if this BSS_DESC_T is stale if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(SCN_BSS_DESC_REMOVE_TIMEOUT_SEC)) ) { continue; } //4 <3> Check if reach the excessive join retry limit /* NOTE(Kevin): STA_RECORD_T is recorded by TA. */ prStaRec = cnmGetStaRecByAddress(prAdapter, (UINT_8) eNetTypeIndex, prBssDesc->aucSrcAddr); if (prStaRec) { /* NOTE(Kevin): * The Status Code is the result of a Previous Connection Request, we use this as SCORE for choosing a proper * candidate (Also used for compare see <6>) * The Reason Code is an indication of the reason why AP reject us, we use this Code for "Reject" * a SCAN result to become our candidate(Like a blacklist). */ #if 0 /* TODO(Kevin): */ if (prStaRec->u2ReasonCode != REASON_CODE_RESERVED) { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS with previous Reason Code = %d\n", prStaRec->u2ReasonCode)); continue; } else #endif if (prStaRec->u2StatusCode != STATUS_CODE_SUCCESSFUL) { /* NOTE(Kevin): greedy association - after timeout, we'll still * try to associate to the AP whose STATUS of conection attempt * was not success. * We may also use (ucJoinFailureCount x JOIN_RETRY_INTERVAL_SEC) for * time bound. */ if ((prStaRec->ucJoinFailureCount < JOIN_MAX_RETRY_FAILURE_COUNT) || (CHECK_FOR_TIMEOUT(rCurrentTime, prStaRec->rLastJoinTime, SEC_TO_SYSTIME(JOIN_RETRY_INTERVAL_SEC)))) { /* NOTE(Kevin): Every JOIN_RETRY_INTERVAL_SEC interval, we can retry * JOIN_MAX_RETRY_FAILURE_COUNT times. */ if (prStaRec->ucJoinFailureCount >= JOIN_MAX_RETRY_FAILURE_COUNT) { prStaRec->ucJoinFailureCount = 0; } DBGLOG(SCN, INFO, ("SEARCH: Try to join BSS again which has Status Code = %d (Curr = %ld/Last Join = %ld)\n", prStaRec->u2StatusCode, rCurrentTime, prStaRec->rLastJoinTime)); } else { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS which reach maximum Join Retry Count = %d \n", JOIN_MAX_RETRY_FAILURE_COUNT)); continue; } } } //4 <4> Check for various NETWORK conditions if (eNetTypeIndex == NETWORK_TYPE_AIS_INDEX) { //4 <4.1> Check BSS Type for the corresponding Operation Mode in Connection Setting /* NOTE(Kevin): For NET_TYPE_AUTO_SWITCH, we will always pass following check. */ if (((prConnSettings->eOPMode == NET_TYPE_INFRA) && (prBssDesc->eBSSType != BSS_TYPE_INFRASTRUCTURE)) || ((prConnSettings->eOPMode == NET_TYPE_IBSS || prConnSettings->eOPMode == NET_TYPE_DEDICATED_IBSS) && (prBssDesc->eBSSType != BSS_TYPE_IBSS))) { DBGLOG(SCN, INFO, ("SEARCH: Ignore eBSSType = %s\n", ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) ? "INFRASTRUCTURE" : "IBSS"))); continue; } //4 <4.2> Check AP's BSSID if OID_802_11_BSSID has been set. if ((prConnSettings->fgIsConnByBssidIssued) && (prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE)) { if (UNEQUAL_MAC_ADDR(prConnSettings->aucBSSID, prBssDesc->aucBSSID)) { DBGLOG(SCN, INFO, ("SEARCH: Ignore due to BSSID was not matched!\n")); continue; } } #if CFG_SUPPORT_ADHOC //4 <4.3> Check for AdHoc Mode if (prBssDesc->eBSSType == BSS_TYPE_IBSS) { OS_SYSTIME rCurrentTime; //4 <4.3.1> Check if this SCAN record has been updated recently for IBSS. /* NOTE(Kevin): Because some STA may change its BSSID frequently after it * create the IBSS - e.g. IPN2220, so we need to make sure we get the new one. * For BSS, if the old record was matched, however it won't be able to pass * the Join Process later. */ GET_CURRENT_SYSTIME(&rCurrentTime); if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(SCN_ADHOC_BSS_DESC_TIMEOUT_SEC))) { DBGLOG(SCN, LOUD, ("SEARCH: Skip old record of BSS Descriptor - BSSID:["MACSTR"]\n\n", MAC2STR(prBssDesc->aucBSSID))); continue; } //4 <4.3.2> Check Peer's capability if (ibssCheckCapabilityForAdHocMode(prAdapter, prBssDesc) == WLAN_STATUS_FAILURE) { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS DESC MAC: "MACSTR", Capability is not supported for current AdHoc Mode.\n", MAC2STR(prPrimaryBssDesc->aucBSSID))); continue; } //4 <4.3.3> Compare TSF if (prBssInfo->fgIsBeaconActivated && UNEQUAL_MAC_ADDR(prBssInfo->aucBSSID, prBssDesc->aucBSSID)) { DBGLOG(SCN, LOUD, ("SEARCH: prBssDesc->fgIsLargerTSF = %d\n", prBssDesc->fgIsLargerTSF)); if (!prBssDesc->fgIsLargerTSF) { DBGLOG(SCN, INFO, ("SEARCH: Ignore BSS DESC MAC: ["MACSTR"], Smaller TSF\n", MAC2STR(prBssDesc->aucBSSID))); continue; } } } #endif /* CFG_SUPPORT_ADHOC */ } #if 0 /* TODO(Kevin): For IBSS */ //4 <2.c> Check if this SCAN record has been updated recently for IBSS. /* NOTE(Kevin): Because some STA may change its BSSID frequently after it * create the IBSS, so we need to make sure we get the new one. * For BSS, if the old record was matched, however it won't be able to pass * the Join Process later. */ if (prBssDesc->eBSSType == BSS_TYPE_IBSS) { OS_SYSTIME rCurrentTime; GET_CURRENT_SYSTIME(&rCurrentTime); if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(BSS_DESC_TIMEOUT_SEC))) { DBGLOG(SCAN, TRACE, ("Skip old record of BSS Descriptor - BSSID:["MACSTR"]\n\n", MAC2STR(prBssDesc->aucBSSID))); continue; } } if ((prBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) && (prAdapter->eConnectionState == MEDIA_STATE_CONNECTED)) { OS_SYSTIME rCurrentTime; GET_CURRENT_SYSTIME(&rCurrentTime); if (CHECK_FOR_TIMEOUT(rCurrentTime, prBssDesc->rUpdateTime, SEC_TO_SYSTIME(BSS_DESC_TIMEOUT_SEC))) { DBGLOG(SCAN, TRACE, ("Skip old record of BSS Descriptor - BSSID:["MACSTR"]\n\n", MAC2STR(prBssDesc->aucBSSID))); continue; } } //4 <4B> Check for IBSS AdHoc Mode. /* Skip if one or more BSS Basic Rate are not supported by current AdHocMode */ if (prPrimaryBssDesc->eBSSType == BSS_TYPE_IBSS) { //4 <4B.1> Check if match the Capability of current IBSS AdHoc Mode. if (ibssCheckCapabilityForAdHocMode(prAdapter, prPrimaryBssDesc) == WLAN_STATUS_FAILURE) { DBGLOG(SCAN, TRACE, ("Ignore BSS DESC MAC: "MACSTR", Capability is not supported for current AdHoc Mode.\n", MAC2STR(prPrimaryBssDesc->aucBSSID))); continue; } //4 <4B.2> IBSS Merge Decision Flow for SEARCH STATE. if (prAdapter->fgIsIBSSActive && UNEQUAL_MAC_ADDR(prBssInfo->aucBSSID, prPrimaryBssDesc->aucBSSID)) { if (!fgIsLocalTSFRead) { NIC_GET_CURRENT_TSF(prAdapter, &rCurrentTsf); DBGLOG(SCAN, TRACE, ("\n\nCurrent TSF : %08lx-%08lx\n\n", rCurrentTsf.u.HighPart, rCurrentTsf.u.LowPart)); } if (rCurrentTsf.QuadPart > prPrimaryBssDesc->u8TimeStamp.QuadPart) { DBGLOG(SCAN, TRACE, ("Ignore BSS DESC MAC: ["MACSTR"], Current BSSID: ["MACSTR"].\n", MAC2STR(prPrimaryBssDesc->aucBSSID), MAC2STR(prBssInfo->aucBSSID))); DBGLOG(SCAN, TRACE, ("\n\nBSS's TSF : %08lx-%08lx\n\n", prPrimaryBssDesc->u8TimeStamp.u.HighPart, prPrimaryBssDesc->u8TimeStamp.u.LowPart)); prPrimaryBssDesc->fgIsLargerTSF = FALSE; continue; } else { prPrimaryBssDesc->fgIsLargerTSF = TRUE; } } } //4 <5> Check the Encryption Status. if (rsnPerformPolicySelection(prPrimaryBssDesc)) { if (prPrimaryBssDesc->ucEncLevel > 0) { fgIsFindBestEncryptionLevel = TRUE; fgIsFindFirst = FALSE; } } else { /* Can't pass the Encryption Status Check, get next one */ continue; } /* For RSN Pre-authentication, update the PMKID canidate list for same SSID and encrypt status */ /* Update PMKID candicate list. */ if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) { rsnUpdatePmkidCandidateList(prPrimaryBssDesc); if (prAdapter->rWifiVar.rAisBssInfo.u4PmkidCandicateCount) { prAdapter->rWifiVar.rAisBssInfo.fgIndicatePMKID = rsnCheckPmkidCandicate(); } } #endif prPrimaryBssDesc = (P_BSS_DESC_T)NULL; //4 <6> Check current Connection Policy. switch (prConnSettings->eConnectionPolicy) { case CONNECT_BY_SSID_BEST_RSSI: /* Choose Hidden SSID to join only if the `fgIsEnableJoin...` is TRUE */ if (prAdapter->rWifiVar.fgEnableJoinToHiddenSSID && prBssDesc->fgIsHiddenSSID) { /* NOTE(Kevin): following if () statement means that * If Target is hidden, then we won't connect when user specify SSID_ANY policy. */ if (prConnSettings->ucSSIDLen) { prPrimaryBssDesc = prBssDesc; fgIsFindBestRSSI = TRUE; } } else if (EQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen)) { prPrimaryBssDesc = prBssDesc; fgIsFindBestRSSI = TRUE; } break; case CONNECT_BY_SSID_ANY: /* NOTE(Kevin): In this policy, we don't know the desired * SSID from user, so we should exclude the Hidden SSID from scan list. * And because we refuse to connect to Hidden SSID node at the beginning, so * when the JOIN Module deal with a BSS_DESC_T which has fgIsHiddenSSID == TRUE, * then the Connection Settings must be valid without doubt. */ if (!prBssDesc->fgIsHiddenSSID) { prPrimaryBssDesc = prBssDesc; fgIsFindFirst = TRUE; } break; case CONNECT_BY_BSSID: if(EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prConnSettings->aucBSSID)) { prPrimaryBssDesc = prBssDesc; } break; default: break; } /* Primary Candidate was not found */ if (prPrimaryBssDesc == NULL) { continue; } //4 <7> Check the Encryption Status. if (prPrimaryBssDesc->eBSSType == BSS_TYPE_INFRASTRUCTURE) { #if CFG_SUPPORT_WAPI if (prAdapter->rWifiVar.rConnSettings.fgWapiMode) { if (wapiPerformPolicySelection(prAdapter, prPrimaryBssDesc)) { fgIsFindFirst = TRUE; } else { /* Can't pass the Encryption Status Check, get next one */ continue; } } else #endif #if CFG_RSN_MIGRATION if (rsnPerformPolicySelection(prAdapter, prPrimaryBssDesc)) { if (prAisSpecBssInfo->fgCounterMeasure) { DBGLOG(RSN, INFO, ("Skip while at counter measure period!!!\n")); continue; } if (prPrimaryBssDesc->ucEncLevel > 0) { fgIsFindBestEncryptionLevel = TRUE; fgIsFindFirst = FALSE; } #if 0 /* Update PMKID candicate list. */ if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) { rsnUpdatePmkidCandidateList(prPrimaryBssDesc); if (prAisSpecBssInfo->u4PmkidCandicateCount) { if (rsnCheckPmkidCandicate()) { DBGLOG(RSN, WARN, ("Prepare a timer to indicate candidate "MACSTR"\n", MAC2STR(prAisSpecBssInfo->arPmkidCache[prAisSpecBssInfo->u4PmkidCacheCount].rBssidInfo.aucBssid))); cnmTimerStopTimer(&prAisSpecBssInfo->rPreauthenticationTimer); cnmTimerStartTimer(&prAisSpecBssInfo->rPreauthenticationTimer, SEC_TO_MSEC(WAIT_TIME_IND_PMKID_CANDICATE_SEC)); } } } #endif } else { /* Can't pass the Encryption Status Check, get next one */ continue; } #endif } else { /* Todo:: P2P and BOW Policy Selection */ } prPrimaryStaRec = prStaRec; //4 <8> Compare the Candidate and the Primary Scan Record. if (!prCandidateBssDesc) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; //4 <8.1> Condition - Get the first matched one. if (fgIsFindFirst) { break; } } else { #if 0 /* TODO(Kevin): For security(TBD) */ //4 <6B> Condition - Choose the one with best Encryption Score. if (fgIsFindBestEncryptionLevel) { if (prCandidateBssDesc->ucEncLevel < prPrimaryBssDesc->ucEncLevel) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } /* If reach here, that means they have the same Encryption Score. */ //4 <6C> Condition - Give opportunity to the one we didn't connect before. // For roaming, only compare the candidates other than current associated BSSID. if (!prCandidateBssDesc->fgIsConnected && !prPrimaryBssDesc->fgIsConnected) { if ((prCandidateStaRec != (P_STA_RECORD_T)NULL) && (prCandidateStaRec->u2StatusCode != STATUS_CODE_SUCCESSFUL)) { DBGLOG(SCAN, TRACE, ("So far -BSS DESC MAC: "MACSTR" has nonzero Status Code = %d\n", MAC2STR(prCandidateBssDesc->aucBSSID), prCandidateStaRec->u2StatusCode)); if (prPrimaryStaRec != (P_STA_RECORD_T)NULL) { if (prPrimaryStaRec->u2StatusCode != STATUS_CODE_SUCCESSFUL) { /* Give opportunity to the one with smaller rLastJoinTime */ if (TIME_BEFORE(prCandidateStaRec->rLastJoinTime, prPrimaryStaRec->rLastJoinTime)) { continue; } /* We've connect to CANDIDATE recently, let us try PRIMARY now */ else { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } /* PRIMARY's u2StatusCode = 0 */ else { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } /* PRIMARY has no StaRec - We didn't connet to PRIMARY before */ else { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } else { if ((prPrimaryStaRec != (P_STA_RECORD_T)NULL) && (prPrimaryStaRec->u2StatusCode != STATUS_CODE_SUCCESSFUL)) { continue; } } } #endif //4 <6D> Condition - Visible SSID win Hidden SSID. if (prCandidateBssDesc->fgIsHiddenSSID) { if (!prPrimaryBssDesc->fgIsHiddenSSID) { prCandidateBssDesc = prPrimaryBssDesc; /* The non Hidden SSID win. */ prCandidateStaRec = prPrimaryStaRec; continue; } } else { if (prPrimaryBssDesc->fgIsHiddenSSID) { continue; } } //4 <6E> Condition - Choose the one with better RCPI(RSSI). if (fgIsFindBestRSSI) { /* TODO(Kevin): We shouldn't compare the actual value, we should * allow some acceptable tolerance of some RSSI percentage here. */ DBGLOG(SCN, TRACE, ("Candidate ["MACSTR"]: RCPI = %d, Primary ["MACSTR"]: RCPI = %d\n", MAC2STR(prCandidateBssDesc->aucBSSID), prCandidateBssDesc->ucRCPI, MAC2STR(prPrimaryBssDesc->aucBSSID), prPrimaryBssDesc->ucRCPI)); ASSERT(!(prCandidateBssDesc->fgIsConnected && prPrimaryBssDesc->fgIsConnected)); /* NOTE: To prevent SWING, we do roaming only if target AP has at least 5dBm larger than us. */ if (prCandidateBssDesc->fgIsConnected) { if (prCandidateBssDesc->ucRCPI + ROAMING_NO_SWING_RCPI_STEP <= prPrimaryBssDesc->ucRCPI) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } else if (prPrimaryBssDesc->fgIsConnected) { if (prCandidateBssDesc->ucRCPI < prPrimaryBssDesc->ucRCPI + ROAMING_NO_SWING_RCPI_STEP) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } else if (prCandidateBssDesc->ucRCPI < prPrimaryBssDesc->ucRCPI) { prCandidateBssDesc = prPrimaryBssDesc; prCandidateStaRec = prPrimaryStaRec; continue; } } #if 0 /* If reach here, that means they have the same Encryption Score, and * both RSSI value are close too. */ //4 <6F> Seek the minimum Channel Load for less interference. if (fgIsFindMinChannelLoad) { /* TODO(Kevin): Check which one has minimum channel load in its channel */ } #endif } } return prCandidateBssDesc; } /* end of scanSearchBssDescByPolicy() */
gpl-2.0
tyler6389/count_kernel_grand
arch/arm/mach-exynos/setup-fb-s5p.c
264
22001
/* linux/arch/arm/mach-exynos/setup-fb-s5p.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Base FIMD controller configuration * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/gcd.h> #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <mach/map.h> #include <mach/gpio.h> #include <mach/board_rev.h> #include <plat/clock.h> #include <plat/gpio-cfg.h> #include <plat/cpu.h> #include <plat/clock-clksrc.h> #if defined(CONFIG_S5P_DSIM_SWITCHABLE_DUAL_LCD) #include <../../../drivers/video/samsung_duallcd/s3cfb.h> #else #include <../../../drivers/video/samsung/s3cfb.h> /* should be fixed */ #endif struct platform_device; /* don't need the contents */ #ifdef CONFIG_FB_S5P void s3cfb_set_display_path(void) { u32 reg; #ifdef CONFIG_FB_S5P_MDNIE reg = __raw_readl(S3C_VA_SYS + 0x0210); reg &= ~(1<<13); reg &= ~(1<<12); reg &= ~(3<<10); reg |= (1<<0); reg &= ~(1<<1); __raw_writel(reg, S3C_VA_SYS + 0x0210); #else reg = __raw_readl(S3C_VA_SYS + 0x0210); reg |= (1<<1); __raw_writel(reg, S3C_VA_SYS + 0x0210); #endif } #if !defined(CONFIG_FB_S5P_MIPI_DSIM) static void s3cfb_gpio_setup_24bpp(unsigned int start, unsigned int size, unsigned int cfg, s5p_gpio_drvstr_t drvstr) { s3c_gpio_cfgrange_nopull(start, size, cfg); for (; size > 0; size--, start++) s5p_gpio_set_drvstr(start, drvstr); } #endif #if defined(CONFIG_FB_S5P_WA101S) || defined(CONFIG_FB_S5P_LTE480WV) void s3cfb_cfg_gpio(struct platform_device *pdev) { s3cfb_gpio_setup_24bpp(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); } #elif defined(CONFIG_FB_S5P_AMS369FG06) void s3cfb_cfg_gpio(struct platform_device *pdev) { s3cfb_gpio_setup_24bpp(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); } #elif defined(CONFIG_FB_S5P_LMS501KF03) void s3cfb_cfg_gpio(struct platform_device *pdev) { s3cfb_gpio_setup_24bpp(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); } #elif defined(CONFIG_FB_S5P_HT101HD1) void s3cfb_cfg_gpio(struct platform_device *pdev) { s3cfb_gpio_setup_24bpp(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV1); } #elif defined(CONFIG_FB_S5P_LD9040) || defined(CONFIG_FB_S5P_S6F1202A) void s3cfb_cfg_gpio(struct platform_device *pdev) { s3cfb_gpio_setup_24bpp(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV4); } #elif defined(CONFIG_FB_S5P_S6C1372) void s3cfb_cfg_gpio(struct platform_device *pdev) { s3cfb_gpio_setup_24bpp(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV2); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV2); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV2); s3cfb_gpio_setup_24bpp(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2), S5P_GPIO_DRVSTR_LV2); } #else void s3cfb_cfg_gpio(struct platform_device *pdev) { /* do not modify this #else function, if you want another rgb gpio configuration plz add another one */ } #endif #endif #if defined(CONFIG_FB_S5P_WA101S) int s3cfb_backlight_on(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { return 0; } #elif defined(CONFIG_FB_S5P_LTE480WV) int s3cfb_backlight_on(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { int err; err = gpio_request_one(EXYNOS4_GPX0(6), GPIOF_OUT_INIT_HIGH, "GPX0"); if (err) { printk(KERN_ERR "failed to request GPX0 for " "lcd reset control\n"); return err; } msleep(100); gpio_set_value(EXYNOS4_GPX0(6), 0); msleep(10); gpio_set_value(EXYNOS4_GPX0(6), 1); msleep(10); gpio_free(EXYNOS4_GPX0(6)); return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { return 0; } #elif defined(CONFIG_FB_S5P_HT101HD1) int s3cfb_backlight_on(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; /* Backlight High */ err = gpio_request_one(EXYNOS4_GPD0(0), GPIOF_OUT_INIT_HIGH, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(0)); /* LED_EN (SPI1_MOSI) High */ err = gpio_request_one(EXYNOS4_GPB(2), GPIOF_OUT_INIT_HIGH, "GPB"); if (err) { printk(KERN_ERR "failed to request GPB for " "lcd LED_EN control\n"); return err; } gpio_free(EXYNOS4_GPB(2)); #endif return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; /* Backlight Low */ err = gpio_request_one(EXYNOS4_GPD0(0), GPIOF_OUT_INIT_LOW, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(0)); /* LED_EN (SPI1_MOSI) Low */ err = gpio_request_one(EXYNOS4_GPB(2), GPIOF_OUT_INIT_LOW, "GPB"); if (err) { printk(KERN_ERR "failed to request GPB for " "lcd LED_EN control\n"); return err; } gpio_free(EXYNOS4_GPB(2)); #endif return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { int err; err = gpio_request_one(EXYNOS4_GPH0(1), GPIOF_OUT_INIT_HIGH, "GPH0"); if (err) { printk(KERN_ERR "failed to request GPH0 for " "lcd reset control\n"); return err; } gpio_set_value(EXYNOS4_GPH0(1), 0); gpio_set_value(EXYNOS4_GPH0(1), 1); gpio_free(EXYNOS4_GPH0(1)); return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { return 0; } #elif defined(CONFIG_FB_S5P_AMS369FG06) || defined(CONFIG_FB_S5P_LMS501KF03) int s3cfb_backlight_on(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { #if !defined(CONFIG_BACKLIGHT_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd backlight control\n"); return err; } gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { int err; #ifdef CONFIG_MACH_SMDKC210 err = gpio_request_one(EXYNOS4_GPX0(6), GPIOF_OUT_INIT_HIGH, "GPX0"); if (err) { printk(KERN_ERR "failed to request GPX0 for " "lcd reset control\n"); return err; } gpio_set_value(EXYNOS4_GPX0(6), 0); mdelay(1); gpio_set_value(EXYNOS4_GPX0(6), 1); gpio_free(EXYNOS4_GPX0(6)); #elif defined(CONFIG_MACH_SMDK4X12) if (samsung_board_rev_is_0_1()) { err = gpio_request_one(EXYNOS4212_GPM3(6), GPIOF_OUT_INIT_HIGH, "GPM3"); if (err) { printk(KERN_ERR "failed to request GPM3 for " "lcd reset control\n"); return err; } gpio_set_value(EXYNOS4212_GPM3(6), 0); mdelay(1); gpio_set_value(EXYNOS4212_GPM3(6), 1); gpio_free(EXYNOS4212_GPM3(6)); } else { err = gpio_request_one(EXYNOS4_GPX1(5), GPIOF_OUT_INIT_HIGH, "GPX0"); if (err) { printk(KERN_ERR "failed to request GPX0 for " "lcd reset control\n"); return err; } gpio_set_value(EXYNOS4_GPX1(5), 0); mdelay(1); gpio_set_value(EXYNOS4_GPX1(5), 1); gpio_free(EXYNOS4_GPX1(5)); } #endif return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { return 0; } #elif defined(CONFIG_FB_S5P_S6C1372) && !defined(CONFIG_FB_MDNIE_PWM) int s3cfb_backlight_on(struct platform_device *pdev) { gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0"); gpio_free(EXYNOS4_GPD0(1)); return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0"); gpio_free(EXYNOS4_GPD0(1)); return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { int err; err = gpio_request_one(EXYNOS4_GPC0(1), GPIOF_OUT_INIT_LOW, "GPC0"); if (err) { printk(KERN_ERR "failed to request GPC0 for " "lcd backlight control\n"); return err; } gpio_set_value(EXYNOS4_GPC0(1), GPIO_LEVEL_HIGH); msleep(40); /* LVDS_N_SHDN to low */ err = gpio_request_one(EXYNOS4212_GPM0(5), GPIOF_OUT_INIT_LOW, "GPM0"); if (err) { printk(KERN_ERR "failed to request GPM0 for " "lcd backlight control\n"); return err; } gpio_set_value(EXYNOS4212_GPM0(5), GPIO_LEVEL_HIGH); msleep(300); err = gpio_request_one(EXYNOS4212_GPM0(1), GPIOF_OUT_INIT_LOW, "GPM0"); if (err) { printk(KERN_ERR "failed to request GPM0 for " "lcd backlight control\n"); return err; } gpio_set_value(EXYNOS4212_GPM0(1), GPIO_LEVEL_HIGH); mdelay(2); return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { gpio_set_value(EXYNOS4212_GPM0(1), GPIO_LEVEL_LOW); mdelay(200); /* LVDS_N_SHDN to low */ gpio_set_value(EXYNOS4212_GPM0(5), GPIO_LEVEL_LOW); msleep(40); gpio_set_value(EXYNOS4_GPC0(1), GPIO_LEVEL_LOW); msleep(400); return 0; } #elif defined(CONFIG_FB_S5P_S6C1372) || defined(CONFIG_FB_S5P_S6F1202A) int s3cfb_backlight_on(struct platform_device *pdev) { return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { #if !defined(CONFIG_FB_MDNIE_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for lcd reset control\n"); return err; } gpio_set_value(GPIO_LCD_EN, GPIO_LEVEL_HIGH); msleep(40); /* LVDS_N_SHDN to high*/ gpio_set_value(GPIO_LVDS_NSHDN, GPIO_LEVEL_HIGH); msleep(300); #if defined(CONFIG_FB_S5P_S6C1372) gpio_set_value(GPIO_LED_BACKLIGHT_RESET, GPIO_LEVEL_HIGH); mdelay(2); #else gpio_set_value(GPIO_LCD_LDO_EN, GPIO_LEVEL_HIGH); msleep(200); #endif gpio_set_value(EXYNOS4_GPD0(1), GPIO_LEVEL_HIGH); gpio_free(EXYNOS4_GPD0(1)); #endif return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { #if !defined(CONFIG_FB_MDNIE_PWM) int err; err = gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0"); if (err) { printk(KERN_ERR "failed to request GPD0 for " "lcd reset control\n"); return err; } /* LVDS_nSHDN low*/ gpio_set_value(EXYNOS4_GPD0(1), GPIO_LEVEL_LOW); gpio_free(EXYNOS4_GPD0(1)); #if defined(CONFIG_FB_S5P_S6C1372) gpio_set_value(GPIO_LED_BACKLIGHT_RESET, GPIO_LEVEL_LOW); msleep(200); #else gpio_set_value(GPIO_LCD_LDO_EN, GPIO_LEVEL_LOW); msleep(200); #endif /* LVDS_nSHDN low*/ gpio_set_value(GPIO_LVDS_NSHDN, GPIO_LEVEL_LOW); msleep(40); /* Disable LVDS Panel Power, 1.2, 1.8, display 3.3V */ gpio_set_value(GPIO_LCD_EN, GPIO_LEVEL_LOW); msleep(400); #endif return 0; } #else int s3cfb_backlight_on(struct platform_device *pdev) { return 0; } int s3cfb_backlight_off(struct platform_device *pdev) { return 0; } int s3cfb_lcd_on(struct platform_device *pdev) { return 0; } int s3cfb_lcd_off(struct platform_device *pdev) { return 0; } #endif #ifdef CONFIG_FB_S5P_MIPI_DSIM int s3cfb_mipi_clk_enable(int enable) { struct clk *dsim_clk = NULL; dsim_clk = clk_get(NULL, "dsim0"); if (IS_ERR(dsim_clk)) { printk(KERN_ERR "failed to get ip clk for dsim0\n"); goto err_clk0; } if (enable) clk_enable(dsim_clk); else clk_disable(dsim_clk); clk_put(dsim_clk); return 0; err_clk0: clk_put(dsim_clk); return -EINVAL; } #endif int s3cfb_mdnie_clk_on(u32 rate) { struct clk *sclk = NULL; struct clk *mout_mpll = NULL; struct clk *mdnie_clk = NULL; int ret = 0; mdnie_clk = clk_get(NULL, "mdnie0"); /* CLOCK GATE IP ENABLE */ if (IS_ERR(mdnie_clk)) { printk(KERN_ERR "failed to get ip clk for mdnie0\n"); goto err_clk0; } clk_enable(mdnie_clk); clk_put(mdnie_clk); sclk = clk_get(NULL, "sclk_mdnie"); if (IS_ERR(sclk)) { printk(KERN_ERR "failed to get sclk for mdnie\n"); goto err_clk1; } if (soc_is_exynos4210()) mout_mpll = clk_get(NULL, "mout_mpll"); else mout_mpll = clk_get(NULL, "mout_mpll_user"); if (IS_ERR(mout_mpll)) { printk(KERN_ERR "failed to get mout_mpll\n"); goto err_clk2; } clk_set_parent(sclk, mout_mpll); if (!rate) rate = 800 * MHZ; ret = clk_set_rate(sclk, rate); clk_put(mout_mpll); clk_enable(sclk); return 0; err_clk1: clk_put(mout_mpll); err_clk2: clk_put(sclk); err_clk0: clk_put(mdnie_clk); return -EINVAL; } int s3cfb_mdnie_pwm_clk_on(void) { struct clk *sclk = NULL; struct clk *sclk_pre = NULL; struct clk *mout_mpll = NULL; u32 rate = 0; sclk = clk_get(NULL, "sclk_mdnie_pwm"); if (IS_ERR(sclk)) { printk(KERN_ERR "failed to get sclk for mdnie_pwm\n"); goto err_clk1; } sclk_pre = clk_get(NULL, "sclk_mdnie_pwm_pre"); if (IS_ERR(sclk_pre)) { printk(KERN_ERR "failed to get sclk for mdnie_pwm_pre\n"); goto err_clk2; } #if defined(CONFIG_FB_S5P_S6C1372) mout_mpll = clk_get(NULL, "xusbxti"); if (IS_ERR(mout_mpll)) { printk(KERN_ERR "failed to get mout_mpll\n"); goto err_clk3; } clk_set_parent(sclk, mout_mpll); rate = clk_round_rate(sclk, 2200000); if (!rate) rate = 2200000; clk_set_rate(sclk, rate); printk(KERN_INFO "set mdnie_pwm sclk rate to %d\n", rate); clk_set_parent(sclk_pre, mout_mpll); rate = clk_round_rate(sclk_pre, 22000000); if (!rate) rate = 22000000; clk_set_rate(sclk_pre, rate); #elif defined(CONFIG_FB_S5P_S6F1202A) if (soc_is_exynos4210()) mout_mpll = clk_get(NULL, "mout_mpll"); else mout_mpll = clk_get(NULL, "mout_mpll_user"); if (IS_ERR(mout_mpll)) { printk(KERN_ERR "failed to get mout_mpll\n"); goto err_clk3; } clk_set_parent(sclk, mout_mpll); rate = clk_round_rate(sclk, 50000000); if (!rate) rate = 50000000; clk_set_rate(sclk, rate); printk(KERN_INFO "set mdnie_pwm sclk rate to %d\n", rate); clk_set_parent(sclk_pre, mout_mpll); rate = clk_round_rate(sclk_pre, 160000000); if (!rate) rate = 160000000; clk_set_rate(sclk_pre, rate); #else if (soc_is_exynos4210()) mout_mpll = clk_get(NULL, "mout_mpll"); else mout_mpll = clk_get(NULL, "mout_mpll_user"); if (IS_ERR(mout_mpll)) { printk(KERN_ERR "failed to get mout_mpll\n"); goto err_clk3; } clk_set_parent(sclk, mout_mpll); rate = 57500000; clk_set_rate(sclk, rate); #endif printk(KERN_INFO "set mdnie_pwm sclk rate to %d\n", rate); clk_put(mout_mpll); clk_enable(sclk); return 0; err_clk3: clk_put(mout_mpll); err_clk2: clk_put(sclk_pre); err_clk1: clk_put(sclk); return -EINVAL; } unsigned int get_clk_rate(struct platform_device *pdev, struct clk *sclk) { struct s3c_platform_fb *pdata = pdev->dev.platform_data; struct s3cfb_lcd *lcd = (struct s3cfb_lcd *)pdata->lcd; struct s3cfb_lcd_timing *timing = &lcd->timing; u32 src_clk, vclk, div, rate; u32 vclk_limit, div_limit, fimd_div; src_clk = clk_get_rate(sclk); vclk = (lcd->freq * (timing->h_bp + timing->h_fp + timing->h_sw + lcd->width) * (timing->v_bp + timing->v_fp + timing->v_sw + lcd->height)); if (!vclk) vclk = src_clk; div = DIV_ROUND_CLOSEST(src_clk, vclk); if (lcd->freq_limit) { vclk_limit = (lcd->freq_limit * (timing->h_bp + timing->h_fp + timing->h_sw + lcd->width) * (timing->v_bp + timing->v_fp + timing->v_sw + lcd->height)); div_limit = DIV_ROUND_CLOSEST(src_clk, vclk_limit); fimd_div = gcd(div, div_limit); div /= fimd_div; } if (!div) { dev_err(&pdev->dev, "div(%d) should be non-zero\n", div); div = 1; } else if (div > 16) { dev_err(&pdev->dev, "div(%d) max should be 16\n", div); for (fimd_div = 2; fimd_div < div; div++) { if (div%fimd_div == 0) break; } div /= fimd_div; div = (div > 16) ? 16 : div; } rate = src_clk / div; if ((src_clk % rate) && (div != 1)) { div--; rate = src_clk / div; if (!(src_clk % rate)) rate--; } dev_info(&pdev->dev, "vclk=%d, div=%d(%d), rate=%d\n", vclk, DIV_ROUND_CLOSEST(src_clk, vclk), div, rate); return rate; } int s3cfb_clk_on(struct platform_device *pdev, struct clk **s3cfb_clk) { struct clk *sclk = NULL; struct clk *mout_mpll = NULL; struct clk *lcd_clk = NULL; struct clksrc_clk *src_clk = NULL; u32 clkdiv = 0; struct s3c_platform_fb *pdata = pdev->dev.platform_data; struct s3cfb_lcd *lcd = (struct s3cfb_lcd *)pdata->lcd; u32 rate = 0; int ret = 0; lcd_clk = clk_get(&pdev->dev, "lcd"); if (IS_ERR(lcd_clk)) { dev_err(&pdev->dev, "failed to get operation clk for fimd\n"); goto err_clk0; } ret = clk_enable(lcd_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to clk_enable of lcd clk for fimd\n"); goto err_clk0; } clk_put(lcd_clk); sclk = clk_get(&pdev->dev, "sclk_fimd"); if (IS_ERR(sclk)) { dev_err(&pdev->dev, "failed to get sclk for fimd\n"); goto err_clk1; } if (soc_is_exynos4210()) mout_mpll = clk_get(&pdev->dev, "mout_mpll"); else mout_mpll = clk_get(&pdev->dev, "mout_mpll_user"); if (IS_ERR(mout_mpll)) { dev_err(&pdev->dev, "failed to get mout_mpll for fimd\n"); goto err_clk2; } ret = clk_set_parent(sclk, mout_mpll); if (ret < 0) { dev_err(&pdev->dev, "failed to clk_set_parent for fimd\n"); goto err_clk2; } if (!lcd->vclk) { rate = get_clk_rate(pdev, mout_mpll); if (!rate) rate = 800 * MHZ; /* MOUT PLL */ lcd->vclk = rate; } else rate = lcd->vclk; ret = clk_set_rate(sclk, rate); if (ret < 0) { dev_err(&pdev->dev, "failed to clk_set_rate of sclk for fimd\n"); goto err_clk2; } dev_dbg(&pdev->dev, "set fimd sclk rate to %d\n", rate); clk_put(mout_mpll); ret = clk_enable(sclk); if (ret < 0) { dev_err(&pdev->dev, "failed to clk_enable of sclk for fimd\n"); goto err_clk2; } *s3cfb_clk = sclk; #ifdef CONFIG_FB_S5P_MIPI_DSIM s3cfb_mipi_clk_enable(1); #endif #ifdef CONFIG_FB_S5P_MDNIE s3cfb_mdnie_clk_on(rate); #ifdef CONFIG_FB_MDNIE_PWM s3cfb_mdnie_pwm_clk_on(); #endif #endif src_clk = container_of(sclk, struct clksrc_clk, clk); clkdiv = __raw_readl(src_clk->reg_div.reg); dev_info(&pdev->dev, "fimd sclk rate %ld, clkdiv 0x%x\n", clk_get_rate(sclk), clkdiv); return 0; err_clk2: clk_put(mout_mpll); err_clk1: clk_put(sclk); err_clk0: clk_put(lcd_clk); return -EINVAL; } int s3cfb_mdnie_clk_off(void) { struct clk *sclk = NULL; struct clk *mdnie_clk = NULL; mdnie_clk = clk_get(NULL, "mdnie0"); /* CLOCK GATE IP ENABLE */ if (IS_ERR(mdnie_clk)) { printk(KERN_ERR "failed to get ip clk for fimd0\n"); goto err_clk0; } clk_disable(mdnie_clk); clk_put(mdnie_clk); sclk = clk_get(NULL, "sclk_mdnie"); if (IS_ERR(sclk)) printk(KERN_ERR "failed to get sclk for mdnie\n"); clk_disable(sclk); clk_put(sclk); return 0; err_clk0: clk_put(mdnie_clk); return -EINVAL; } int s3cfb_mdnie_pwm_clk_off(void) { struct clk *sclk = NULL; sclk = clk_get(NULL, "sclk_mdnie_pwm"); if (IS_ERR(sclk)) printk(KERN_ERR "failed to get sclk for mdnie_pwm\n"); clk_disable(sclk); clk_put(sclk); return 0; } int s3cfb_clk_off(struct platform_device *pdev, struct clk **clk) { struct clk *lcd_clk = NULL; lcd_clk = clk_get(&pdev->dev, "lcd"); if (IS_ERR(lcd_clk)) { printk(KERN_ERR "failed to get ip clk for fimd0\n"); goto err_clk0; } clk_disable(lcd_clk); clk_put(lcd_clk); clk_disable(*clk); clk_put(*clk); *clk = NULL; #ifdef CONFIG_FB_S5P_MIPI_DSIM s3cfb_mipi_clk_enable(0); #endif #ifdef CONFIG_FB_S5P_MDNIE s3cfb_mdnie_clk_off(); s3cfb_mdnie_pwm_clk_off(); #endif return 0; err_clk0: clk_put(lcd_clk); return -EINVAL; } void s3cfb_get_clk_name(char *clk_name) { strcpy(clk_name, "sclk_fimd"); }
gpl-2.0
wozgeass/Raspberry-RT
drivers/edac/edac_device.c
264
19845
/* * edac_device.c * (C) 2007 www.douglaskthompson.com * * This file may be distributed under the terms of the * GNU General Public License. * * Written by Doug Thompson <norsk5@xmission.com> * * edac_device API implementation * 19 Jan 2007 */ #include <linux/module.h> #include <linux/types.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/highmem.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/workqueue.h> #include <asm/uaccess.h> #include <asm/page.h> #include "edac_core.h" #include "edac_module.h" /* lock for the list: 'edac_device_list', manipulation of this list * is protected by the 'device_ctls_mutex' lock */ static DEFINE_MUTEX(device_ctls_mutex); static LIST_HEAD(edac_device_list); #ifdef CONFIG_EDAC_DEBUG static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) { edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n", edac_dev, edac_dev->dev_idx); edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check); edac_dbg(3, "\tdev = %p\n", edac_dev->dev); edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", edac_dev->mod_name, edac_dev->ctl_name); edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info); } #endif /* CONFIG_EDAC_DEBUG */ /* * edac_device_alloc_ctl_info() * Allocate a new edac device control info structure * * The control structure is allocated in complete chunk * from the OS. It is in turn sub allocated to the * various objects that compose the structure * * The structure has a 'nr_instance' array within itself. * Each instance represents a major component * Example: L1 cache and L2 cache are 2 instance components * * Within each instance is an array of 'nr_blocks' blockoffsets */ struct edac_device_ctl_info *edac_device_alloc_ctl_info( unsigned sz_private, char *edac_device_name, unsigned nr_instances, char *edac_block_name, unsigned nr_blocks, unsigned offset_value, /* zero, 1, or other based offset */ struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib, int device_index) { struct edac_device_ctl_info *dev_ctl; struct edac_device_instance *dev_inst, *inst; struct edac_device_block *dev_blk, *blk_p, *blk; struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib; unsigned total_size; unsigned count; unsigned instance, block, attr; void *pvt, *p; int err; edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks); /* Calculate the size of memory we need to allocate AND * determine the offsets of the various item arrays * (instance,block,attrib) from the start of an allocated structure. * We want the alignment of each item (instance,block,attrib) * to be at least as stringent as what the compiler would * provide if we could simply hardcode everything into a single struct. */ p = NULL; dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1); /* Calc the 'end' offset past end of ONE ctl_info structure * which will become the start of the 'instance' array */ dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances); /* Calc the 'end' offset past the instance array within the ctl_info * which will become the start of the block array */ count = nr_instances * nr_blocks; dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count); /* Calc the 'end' offset past the dev_blk array * which will become the start of the attrib array, if any. */ /* calc how many nr_attrib we need */ if (nr_attrib > 0) count *= nr_attrib; dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count); /* Calc the 'end' offset past the attributes array */ pvt = edac_align_ptr(&p, sz_private, 1); /* 'pvt' now points to where the private data area is. * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) * is baselined at ZERO */ total_size = ((unsigned long)pvt) + sz_private; /* Allocate the amount of memory for the set of control structures */ dev_ctl = kzalloc(total_size, GFP_KERNEL); if (dev_ctl == NULL) return NULL; /* Adjust pointers so they point within the actual memory we * just allocated rather than an imaginary chunk of memory * located at address 0. * 'dev_ctl' points to REAL memory, while the others are * ZERO based and thus need to be adjusted to point within * the allocated memory. */ dev_inst = (struct edac_device_instance *) (((char *)dev_ctl) + ((unsigned long)dev_inst)); dev_blk = (struct edac_device_block *) (((char *)dev_ctl) + ((unsigned long)dev_blk)); dev_attrib = (struct edac_dev_sysfs_block_attribute *) (((char *)dev_ctl) + ((unsigned long)dev_attrib)); pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL; /* Begin storing the information into the control info structure */ dev_ctl->dev_idx = device_index; dev_ctl->nr_instances = nr_instances; dev_ctl->instances = dev_inst; dev_ctl->pvt_info = pvt; /* Default logging of CEs and UEs */ dev_ctl->log_ce = 1; dev_ctl->log_ue = 1; /* Name of this edac device */ snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); edac_dbg(4, "edac_dev=%p next after end=%p\n", dev_ctl, pvt + sz_private); /* Initialize every Instance */ for (instance = 0; instance < nr_instances; instance++) { inst = &dev_inst[instance]; inst->ctl = dev_ctl; inst->nr_blocks = nr_blocks; blk_p = &dev_blk[instance * nr_blocks]; inst->blocks = blk_p; /* name of this instance */ snprintf(inst->name, sizeof(inst->name), "%s%u", edac_device_name, instance); /* Initialize every block in each instance */ for (block = 0; block < nr_blocks; block++) { blk = &blk_p[block]; blk->instance = inst; snprintf(blk->name, sizeof(blk->name), "%s%d", edac_block_name, block+offset_value); edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n", instance, inst, block, blk, blk->name); /* if there are NO attributes OR no attribute pointer * then continue on to next block iteration */ if ((nr_attrib == 0) || (attrib_spec == NULL)) continue; /* setup the attribute array for this block */ blk->nr_attribs = nr_attrib; attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; blk->block_attributes = attrib_p; edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n", blk->block_attributes); /* Initialize every user specified attribute in this * block with the data the caller passed in * Each block gets its own copy of pointers, * and its unique 'value' */ for (attr = 0; attr < nr_attrib; attr++) { attrib = &attrib_p[attr]; /* populate the unique per attrib * with the code pointers and info */ attrib->attr = attrib_spec[attr].attr; attrib->show = attrib_spec[attr].show; attrib->store = attrib_spec[attr].store; attrib->block = blk; /* up link */ edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n", attrib, attrib->attr.name, &attrib_spec[attr], attrib_spec[attr].attr.name ); } } } /* Mark this instance as merely ALLOCATED */ dev_ctl->op_state = OP_ALLOC; /* * Initialize the 'root' kobj for the edac_device controller */ err = edac_device_register_sysfs_main_kobj(dev_ctl); if (err) { kfree(dev_ctl); return NULL; } /* at this point, the root kobj is valid, and in order to * 'free' the object, then the function: * edac_device_unregister_sysfs_main_kobj() must be called * which will perform kobj unregistration and the actual free * will occur during the kobject callback operation */ return dev_ctl; } EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info); /* * edac_device_free_ctl_info() * frees the memory allocated by the edac_device_alloc_ctl_info() * function */ void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info) { edac_device_unregister_sysfs_main_kobj(ctl_info); } EXPORT_SYMBOL_GPL(edac_device_free_ctl_info); /* * find_edac_device_by_dev * scans the edac_device list for a specific 'struct device *' * * lock to be held prior to call: device_ctls_mutex * * Return: * pointer to control structure managing 'dev' * NULL if not found on list */ static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev) { struct edac_device_ctl_info *edac_dev; struct list_head *item; edac_dbg(0, "\n"); list_for_each(item, &edac_device_list) { edac_dev = list_entry(item, struct edac_device_ctl_info, link); if (edac_dev->dev == dev) return edac_dev; } return NULL; } /* * add_edac_dev_to_global_list * Before calling this function, caller must * assign a unique value to edac_dev->dev_idx. * * lock to be held prior to call: device_ctls_mutex * * Return: * 0 on success * 1 on failure. */ static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev) { struct list_head *item, *insert_before; struct edac_device_ctl_info *rover; insert_before = &edac_device_list; /* Determine if already on the list */ rover = find_edac_device_by_dev(edac_dev->dev); if (unlikely(rover != NULL)) goto fail0; /* Insert in ascending order by 'dev_idx', so find position */ list_for_each(item, &edac_device_list) { rover = list_entry(item, struct edac_device_ctl_info, link); if (rover->dev_idx >= edac_dev->dev_idx) { if (unlikely(rover->dev_idx == edac_dev->dev_idx)) goto fail1; insert_before = item; break; } } list_add_tail_rcu(&edac_dev->link, insert_before); return 0; fail0: edac_printk(KERN_WARNING, EDAC_MC, "%s (%s) %s %s already assigned %d\n", dev_name(rover->dev), edac_dev_name(rover), rover->mod_name, rover->ctl_name, rover->dev_idx); return 1; fail1: edac_printk(KERN_WARNING, EDAC_MC, "bug in low-level driver: attempt to assign\n" " duplicate dev_idx %d in %s()\n", rover->dev_idx, __func__); return 1; } /* * del_edac_device_from_global_list */ static void del_edac_device_from_global_list(struct edac_device_ctl_info *edac_device) { list_del_rcu(&edac_device->link); /* these are for safe removal of devices from global list while * NMI handlers may be traversing list */ synchronize_rcu(); INIT_LIST_HEAD(&edac_device->link); } /* * edac_device_workq_function * performs the operation scheduled by a workq request * * this workq is embedded within an edac_device_ctl_info * structure, that needs to be polled for possible error events. * * This operation is to acquire the list mutex lock * (thus preventing insertation or deletion) * and then call the device's poll function IFF this device is * running polled and there is a poll function defined. */ static void edac_device_workq_function(struct work_struct *work_req) { struct delayed_work *d_work = to_delayed_work(work_req); struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work); mutex_lock(&device_ctls_mutex); /* If we are being removed, bail out immediately */ if (edac_dev->op_state == OP_OFFLINE) { mutex_unlock(&device_ctls_mutex); return; } /* Only poll controllers that are running polled and have a check */ if ((edac_dev->op_state == OP_RUNNING_POLL) && (edac_dev->edac_check != NULL)) { edac_dev->edac_check(edac_dev); } mutex_unlock(&device_ctls_mutex); /* Reschedule the workq for the next time period to start again * if the number of msec is for 1 sec, then adjust to the next * whole one second to save timers firing all over the period * between integral seconds */ if (edac_dev->poll_msec == 1000) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); } /* * edac_device_workq_setup * initialize a workq item for this edac_device instance * passing in the new delay period in msec */ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, unsigned msec) { edac_dbg(0, "\n"); /* take the arg 'msec' and set it into the control structure * to used in the time period calculation * then calc the number of jiffies that represents */ edac_dev->poll_msec = msec; edac_dev->delay = msecs_to_jiffies(msec); INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); /* optimize here for the 1 second case, which will be normal value, to * fire ON the 1 second time event. This helps reduce all sorts of * timers firing on sub-second basis, while they are happy * to fire together on the 1 second exactly */ if (edac_dev->poll_msec == 1000) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); } /* * edac_device_workq_teardown * stop the workq processing on this edac_dev */ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) { if (!edac_dev->edac_check) return; edac_dev->op_state = OP_OFFLINE; edac_stop_work(&edac_dev->work); } /* * edac_device_reset_delay_period * * need to stop any outstanding workq queued up at this time * because we will be resetting the sleep time. * Then restart the workq on the new delay */ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, unsigned long value) { unsigned long jiffs = msecs_to_jiffies(value); if (value == 1000) jiffs = round_jiffies_relative(value); edac_dev->poll_msec = value; edac_dev->delay = jiffs; edac_mod_work(&edac_dev->work, jiffs); } /* * edac_device_alloc_index: Allocate a unique device index number * * Return: * allocated index number */ int edac_device_alloc_index(void) { static atomic_t device_indexes = ATOMIC_INIT(0); return atomic_inc_return(&device_indexes) - 1; } EXPORT_SYMBOL_GPL(edac_device_alloc_index); /** * edac_device_add_device: Insert the 'edac_dev' structure into the * edac_device global list and create sysfs entries associated with * edac_device structure. * @edac_device: pointer to the edac_device structure to be added to the list * 'edac_device' structure. * * Return: * 0 Success * !0 Failure */ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) { edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3) edac_device_dump_device(edac_dev); #endif mutex_lock(&device_ctls_mutex); if (add_edac_dev_to_global_list(edac_dev)) goto fail0; /* set load time so that error rate can be tracked */ edac_dev->start_time = jiffies; /* create this instance's sysfs entries */ if (edac_device_create_sysfs(edac_dev)) { edac_device_printk(edac_dev, KERN_WARNING, "failed to create sysfs device\n"); goto fail1; } /* If there IS a check routine, then we are running POLLED */ if (edac_dev->edac_check != NULL) { /* This instance is NOW RUNNING */ edac_dev->op_state = OP_RUNNING_POLL; /* * enable workq processing on this instance, * default = 1000 msec */ edac_device_workq_setup(edac_dev, 1000); } else { edac_dev->op_state = OP_RUNNING_INTERRUPT; } /* Report action taken */ edac_device_printk(edac_dev, KERN_INFO, "Giving out device to module %s controller %s: DEV %s (%s)\n", edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name, edac_op_state_to_string(edac_dev->op_state)); mutex_unlock(&device_ctls_mutex); return 0; fail1: /* Some error, so remove the entry from the lsit */ del_edac_device_from_global_list(edac_dev); fail0: mutex_unlock(&device_ctls_mutex); return 1; } EXPORT_SYMBOL_GPL(edac_device_add_device); /** * edac_device_del_device: * Remove sysfs entries for specified edac_device structure and * then remove edac_device structure from global list * * @dev: * Pointer to 'struct device' representing edac_device * structure to remove. * * Return: * Pointer to removed edac_device structure, * OR NULL if device not found. */ struct edac_device_ctl_info *edac_device_del_device(struct device *dev) { struct edac_device_ctl_info *edac_dev; edac_dbg(0, "\n"); mutex_lock(&device_ctls_mutex); /* Find the structure on the list, if not there, then leave */ edac_dev = find_edac_device_by_dev(dev); if (edac_dev == NULL) { mutex_unlock(&device_ctls_mutex); return NULL; } /* mark this instance as OFFLINE */ edac_dev->op_state = OP_OFFLINE; /* deregister from global list */ del_edac_device_from_global_list(edac_dev); mutex_unlock(&device_ctls_mutex); /* clear workq processing on this instance */ edac_device_workq_teardown(edac_dev); /* Tear down the sysfs entries for this instance */ edac_device_remove_sysfs(edac_dev); edac_printk(KERN_INFO, EDAC_MC, "Removed device %d for %s %s: DEV %s\n", edac_dev->dev_idx, edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev)); return edac_dev; } EXPORT_SYMBOL_GPL(edac_device_del_device); static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev) { return edac_dev->log_ce; } static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev) { return edac_dev->log_ue; } static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info *edac_dev) { return edac_dev->panic_on_ue; } /* * edac_device_handle_ce * perform a common output and handling of an 'edac_dev' CE event */ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, int inst_nr, int block_nr, const char *msg) { struct edac_device_instance *instance; struct edac_device_block *block = NULL; if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) { edac_device_printk(edac_dev, KERN_ERR, "INTERNAL ERROR: 'instance' out of range " "(%d >= %d)\n", inst_nr, edac_dev->nr_instances); return; } instance = edac_dev->instances + inst_nr; if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { edac_device_printk(edac_dev, KERN_ERR, "INTERNAL ERROR: instance %d 'block' " "out of range (%d >= %d)\n", inst_nr, block_nr, instance->nr_blocks); return; } if (instance->nr_blocks > 0) { block = instance->blocks + block_nr; block->counters.ce_count++; } /* Propagate the count up the 'totals' tree */ instance->counters.ce_count++; edac_dev->counters.ce_count++; if (edac_device_get_log_ce(edac_dev)) edac_device_printk(edac_dev, KERN_WARNING, "CE: %s instance: %s block: %s '%s'\n", edac_dev->ctl_name, instance->name, block ? block->name : "N/A", msg); } EXPORT_SYMBOL_GPL(edac_device_handle_ce); /* * edac_device_handle_ue * perform a common output and handling of an 'edac_dev' UE event */ void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, int inst_nr, int block_nr, const char *msg) { struct edac_device_instance *instance; struct edac_device_block *block = NULL; if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) { edac_device_printk(edac_dev, KERN_ERR, "INTERNAL ERROR: 'instance' out of range " "(%d >= %d)\n", inst_nr, edac_dev->nr_instances); return; } instance = edac_dev->instances + inst_nr; if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { edac_device_printk(edac_dev, KERN_ERR, "INTERNAL ERROR: instance %d 'block' " "out of range (%d >= %d)\n", inst_nr, block_nr, instance->nr_blocks); return; } if (instance->nr_blocks > 0) { block = instance->blocks + block_nr; block->counters.ue_count++; } /* Propagate the count up the 'totals' tree */ instance->counters.ue_count++; edac_dev->counters.ue_count++; if (edac_device_get_log_ue(edac_dev)) edac_device_printk(edac_dev, KERN_EMERG, "UE: %s instance: %s block: %s '%s'\n", edac_dev->ctl_name, instance->name, block ? block->name : "N/A", msg); if (edac_device_get_panic_on_ue(edac_dev)) panic("EDAC %s: UE instance: %s block %s '%s'\n", edac_dev->ctl_name, instance->name, block ? block->name : "N/A", msg); } EXPORT_SYMBOL_GPL(edac_device_handle_ue);
gpl-2.0
RadioWar/WIFIPineApple-MKV
package/ead/src/tinysrp/tinysrp.c
776
4695
/* This bit implements a simple API for using the SRP library over sockets. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/socket.h> #include "t_defines.h" #include "t_pwd.h" #include "t_server.h" #include "t_client.h" #include "tinysrp.h" #ifndef MSG_WAITALL #ifdef linux #define MSG_WAITALL 0x100 /* somehow not defined on my box */ #endif #endif /* This is called by the client with a connected socket, username, and passphrase. pass can be NULL in which case the user is queried. */ int tsrp_client_authenticate(int s, char *user, char *pass, TSRP_SESSION *tsrp) { int i, index; unsigned char username[MAXUSERLEN + 1], sbuf[MAXSALTLEN]; unsigned char msgbuf[MAXPARAMLEN + 1], bbuf[MAXPARAMLEN]; unsigned char passbuf[128], *skey; struct t_client *tc; struct t_preconf *tcp; /* @@@ should go away */ struct t_num salt, *A, B; /* Send the username. */ i = strlen(user); if (i > MAXUSERLEN) { i = MAXUSERLEN; } msgbuf[0] = i; memcpy(msgbuf + 1, user, i); if (send(s, msgbuf, i + 1, 0) < 0) { return 0; } memcpy(username, user, i); username[i] = '\0'; /* Get the prime index and salt. */ i = recv(s, msgbuf, 2, MSG_WAITALL); if (i <= 0) { return 0; } index = msgbuf[0]; if (index <= 0 || index > t_getprecount()) { return 0; } tcp = t_getpreparam(index - 1); salt.len = msgbuf[1]; if (salt.len > MAXSALTLEN) { return 0; } salt.data = sbuf; i = recv(s, sbuf, salt.len, MSG_WAITALL); if (i <= 0) { return 0; } /* @@@ t_clientopen() needs a variant that takes the index */ tc = t_clientopen(username, &tcp->modulus, &tcp->generator, &salt); if (tc == NULL) { return 0; } /* Calculate A and send it to the server. */ A = t_clientgenexp(tc); msgbuf[0] = A->len - 1; /* len is max 256 */ memcpy(msgbuf + 1, A->data, A->len); if (send(s, msgbuf, A->len + 1, 0) < 0) { return 0; } /* Ask the user for the passphrase. */ if (pass == NULL) { t_getpass(passbuf, sizeof(passbuf), "Enter password:"); pass = passbuf; } t_clientpasswd(tc, pass); /* Get B from the server. */ i = recv(s, msgbuf, 1, 0); if (i <= 0) { return 0; } B.len = msgbuf[0] + 1; B.data = bbuf; i = recv(s, bbuf, B.len, MSG_WAITALL); if (i <= 0) { return 0; } /* Compute the session key. */ skey = t_clientgetkey(tc, &B); if (skey == NULL) { return 0; } /* Send the response. */ if (send(s, t_clientresponse(tc), RESPONSE_LEN, 0) < 0) { return 0; } /* Get the server's response. */ i = recv(s, msgbuf, RESPONSE_LEN, MSG_WAITALL); if (i <= 0) { return 0; } if (t_clientverify(tc, msgbuf) != 0) { return 0; } /* All done. Now copy the key and clean up. */ if (tsrp) { memcpy(tsrp->username, username, strlen(username) + 1); memcpy(tsrp->key, skey, SESSION_KEY_LEN); } t_clientclose(tc); return 1; } /* This is called by the server with a connected socket. */ int tsrp_server_authenticate(int s, TSRP_SESSION *tsrp) { int i, j; unsigned char username[MAXUSERLEN], *skey; unsigned char msgbuf[MAXPARAMLEN + 1], abuf[MAXPARAMLEN]; struct t_server *ts; struct t_num A, *B; /* Get the username. */ i = recv(s, msgbuf, 1, 0); if (i <= 0) { return 0; } j = msgbuf[0]; i = recv(s, username, j, MSG_WAITALL); if (i <= 0) { return 0; } username[j] = '\0'; ts = t_serveropen(username); if (ts == NULL) { return 0; } /* Send the prime index and the salt. */ msgbuf[0] = ts->index; /* max 256 primes... */ i = ts->s.len; msgbuf[1] = i; memcpy(msgbuf + 2, ts->s.data, i); if (send(s, msgbuf, i + 2, 0) < 0) { return 0; } /* Calculate B while we're waiting. */ B = t_servergenexp(ts); /* Get A from the client. */ i = recv(s, msgbuf, 1, 0); if (i <= 0) { return 0; } A.len = msgbuf[0] + 1; A.data = abuf; i = recv(s, abuf, A.len, MSG_WAITALL); if (i <= 0) { return 0; } /* Now send B. */ msgbuf[0] = B->len - 1; memcpy(msgbuf + 1, B->data, B->len); if (send(s, msgbuf, B->len + 1, 0) < 0) { return 0; } /* Calculate the session key while we're waiting. */ skey = t_servergetkey(ts, &A); if (skey == NULL) { return 0; } /* Get the response from the client. */ i = recv(s, msgbuf, RESPONSE_LEN, MSG_WAITALL); if (i <= 0) { return 0; } if (t_serververify(ts, msgbuf) != 0) { return 0; } /* Client authenticated. Now authenticate ourselves to the client. */ if (send(s, t_serverresponse(ts), RESPONSE_LEN, 0) < 0) { return 0; } /* Copy the key and clean up. */ if (tsrp) { memcpy(tsrp->username, username, strlen(username) + 1); memcpy(tsrp->key, skey, SESSION_KEY_LEN); } t_serverclose(ts); return 1; }
gpl-2.0
ThePlayground/android_kernel_htc_shooter
drivers/gpu/drm/i915/i915_dma.c
1288
59217
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /* * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" #include "../../../platform/x86/intel_ips.h" #include <linux/pci.h> #include <linux/vgaarb.h> #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <acpi/video.h> static void i915_write_hws_pga(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 addr; addr = dev_priv->status_page_dmah->busaddr; if (INTEL_INFO(dev)->gen >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } /** * Sets up the hardware status page for devices that need a physical address * in the register. */ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; /* Program Hardware Status Page */ dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) { DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, 0, PAGE_SIZE); i915_write_hws_pga(dev); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } /** * Frees the hardware status page, whether it's a physical address or a virtual * address set up by the X Server. */ static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } if (ring->status_page.gfx_addr) { ring->status_page.gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } /* Need to rewrite hardware status page */ I915_WRITE(HWS_PGA, 0x1ffff000); } void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting * as we don't expose it to userspace */ if (drm_core_check_feature(dev, DRIVER_MODESET)) return; ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; if (!dev->primary->master) return; master_priv = dev->primary->master->driver_priv; if (ring->head == ring->tail && master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; int i; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); for (i = 0; i < I915_NUM_RINGS; i++) intel_cleanup_ring_buffer(&dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ if (I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); return 0; } static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); } else { DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); } if (init->ring_size != 0) { if (LP_RING(dev_priv)->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } ret = intel_render_ring_init_dri(dev, init->ring_start, init->ring_size); if (ret) { i915_dma_cleanup(dev); return ret; } } dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; dev_priv->current_page = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->pf_current_page = 0; /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; return 0; } static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("%s\n", __func__); if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } /* Program Hardware Status Page */ if (!ring->status_page.page_addr) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } DRM_DEBUG_DRIVER("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) intel_ring_setup_status_page(ring); else i915_write_hws_pga(dev); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } static int i915_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_init_t *init = data; int retcode = 0; switch (init->func) { case I915_INIT_DMA: retcode = i915_initialize(dev, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); break; case I915_RESUME_DMA: retcode = i915_dma_resume(dev); break; default: retcode = -EINVAL; break; } return retcode; } /* Implement basically the same security restrictions as hardware does * for MI_BATCH_NON_SECURE. These can be made stricter at any time. * * Most of the calculations below involve calculating the size of a * particular instruction. It's important to get the size right as * that tells us where the next instruction to check is. Any illegal * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ static int validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: switch ((cmd >> 23) & 0x3f) { case 0x0: return 1; /* MI_NOOP */ case 0x4: return 1; /* MI_FLUSH */ default: return 0; /* disallow everything else */ } break; case 0x1: return 0; /* reserved */ case 0x2: return (cmd & 0xff) + 2; /* 2d commands */ case 0x3: if (((cmd >> 24) & 0x1f) <= 0x18) return 1; switch ((cmd >> 24) & 0x1f) { case 0x1c: return 1; case 0x1d: switch ((cmd >> 16) & 0xff) { case 0x3: return (cmd & 0x1f) + 2; case 0x4: return (cmd & 0xf) + 2; default: return (cmd & 0xffff) + 2; } case 0x1e: if (cmd & (1 << 23)) return (cmd & 0xffff) + 1; else return 1; case 0x1f: if ((cmd & (1 << 23)) == 0) /* inline vertices */ return (cmd & 0x1ffff) + 2; else if (cmd & (1 << 17)) /* indirect random */ if ((cmd & 0xffff) == 0) return 0; /* unknown length, too hard */ else return (((cmd & 0xffff) + 1) / 2) + 1; else return 2; /* indirect sequential */ default: return 0; } default: return 0; } return 0; } static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i, ret; if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) return -EINVAL; for (i = 0; i < dwords;) { int sz = validate_cmd(buffer[i]); if (sz == 0 || i + sz > dwords) return -EINVAL; i += sz; } ret = BEGIN_LP_RING((dwords+1)&~1); if (ret) return ret; for (i = 0; i < dwords; i++) OUT_RING(buffer[i]); if (dwords & 1) OUT_RING(0); ADVANCE_LP_RING(); return 0; } int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 || box->x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box->x1, box->y1, box->x2, box->y2); return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { ret = BEGIN_LP_RING(4); if (ret) return ret; OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); } else { ret = BEGIN_LP_RING(6); if (ret) return ret; OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); } ADVANCE_LP_RING(); return 0; } /* XXX: Emitting the counter should really be moved to part of the IRQ * emit. For now, do it in both places: */ static void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; dev_priv->counter++; if (dev_priv->counter > 0x7FFFFFFFUL) dev_priv->counter = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } } static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t *cmd, struct drm_clip_rect *cliprects, void *cmdbuf) { int nbox = cmd->num_cliprects; int i = 0, count, ret; if (cmd->sz & 0x3) { DRM_ERROR("alignment"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box(dev, &cliprects[i], cmd->DR1, cmd->DR4); if (ret) return ret; } ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); if (ret) return ret; } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { struct drm_i915_private *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; int i, count, ret; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box(dev, &cliprects[i], batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { ret = BEGIN_LP_RING(2); if (ret) return ret; if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); } else { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } } else { ret = BEGIN_LP_RING(4); if (ret) return ret; OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); } ADVANCE_LP_RING(); } if (IS_G4X(dev) || IS_GEN5(dev)) { if (BEGIN_LP_RING(2) == 0) { OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); } } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_flip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; if (!master_priv->sarea_priv) return -EINVAL; DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, dev_priv->current_page, master_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); ret = BEGIN_LP_RING(10); if (ret) return ret; OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_offset); dev_priv->current_page = 1; } else { OUT_RING(dev_priv->front_offset); dev_priv->current_page = 0; } OUT_RING(0); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); ADVANCE_LP_RING(); master_priv->sarea_priv->last_enqueue = dev_priv->counter++; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } static int i915_quiescent(struct drm_device *dev) { struct intel_ring_buffer *ring = LP_RING(dev->dev_private); i915_kernel_lost_context(dev); return intel_wait_ring_idle(ring); } static int i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; RING_LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->struct_mutex); ret = i915_quiescent(dev); mutex_unlock(&dev->struct_mutex); return ret; } static int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; int ret; struct drm_clip_rect *cliprects = NULL; if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); return -EINVAL; } DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", batch->start, batch->used, batch->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (batch->num_cliprects < 0) return -EINVAL; if (batch->num_cliprects) { cliprects = kcalloc(batch->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) return -ENOMEM; ret = copy_from_user(cliprects, batch->cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) { ret = -EFAULT; goto fail_free; } } mutex_lock(&dev->struct_mutex); ret = i915_dispatch_batchbuffer(dev, batch, cliprects); mutex_unlock(&dev->struct_mutex); if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_free: kfree(cliprects); return ret; } static int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; struct drm_clip_rect *cliprects = NULL; void *batch_data; int ret; DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (cmdbuf->num_cliprects < 0) return -EINVAL; batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); if (batch_data == NULL) return -ENOMEM; ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); if (ret != 0) { ret = -EFAULT; goto fail_batch_free; } if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) { ret = -ENOMEM; goto fail_batch_free; } ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) { ret = -EFAULT; goto fail_clip_free; } } mutex_lock(&dev->struct_mutex); ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); goto fail_clip_free; } if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_clip_free: kfree(cliprects); fail_batch_free: kfree(batch_data); return ret; } static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; DRM_DEBUG_DRIVER("%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->struct_mutex); ret = i915_dispatch_flip(dev); mutex_unlock(&dev->struct_mutex); return ret; } static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_getparam_t *param = data; int value; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_PARAM_IRQ_ACTIVE: value = dev->pdev->irq ? 1 : 0; break; case I915_PARAM_ALLOW_BATCHBUFFER: value = dev_priv->allow_batchbuffer ? 1 : 0; break; case I915_PARAM_LAST_DISPATCH: value = READ_BREADCRUMB(dev_priv); break; case I915_PARAM_CHIPSET_ID: value = dev->pci_device; break; case I915_PARAM_HAS_GEM: value = dev_priv->has_gem; break; case I915_PARAM_NUM_FENCES_AVAIL: value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; case I915_PARAM_HAS_OVERLAY: value = dev_priv->overlay ? 1 : 0; break; case I915_PARAM_HAS_PAGEFLIPPING: value = 1; break; case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ value = dev_priv->has_gem; break; case I915_PARAM_HAS_BSD: value = HAS_BSD(dev); break; case I915_PARAM_HAS_BLT: value = HAS_BLT(dev); break; case I915_PARAM_HAS_RELAXED_FENCING: value = 1; break; case I915_PARAM_HAS_COHERENT_RINGS: value = 1; break; case I915_PARAM_HAS_EXEC_CONSTANTS: value = INTEL_INFO(dev)->gen >= 4; break; case I915_PARAM_HAS_RELAXED_DELTA: value = 1; break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } return 0; } static int i915_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_setparam_t *param = data; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: dev_priv->tex_lru_log_granularity = param->value; break; case I915_SETPARAM_ALLOW_BATCHBUFFER: dev_priv->allow_batchbuffer = param->value; break; case I915_SETPARAM_NUM_USED_FENCES: if (param->value > dev_priv->num_fence_regs || param->value < 0) return -EINVAL; /* Userspace can use first N regs */ dev_priv->fence_reg_start = param->value; break; default: DRM_DEBUG_DRIVER("unknown parameter %d\n", param->param); return -EINVAL; } return 0; } static int i915_set_status_page(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; struct intel_ring_buffer *ring = LP_RING(dev_priv); if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } if (drm_core_check_feature(dev, DRIVER_MODESET)) { WARN(1, "tried to set status page when mode setting active\n"); return 0; } DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; dev_priv->hws_map.type = 0; dev_priv->hws_map.flags = 0; dev_priv->hws_map.mtrr = 0; drm_core_ioremap_wc(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { i915_dma_cleanup(dev); ring->status_page.gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } ring->status_page.page_addr = (void __force __iomem *)dev_priv->hws_map.handle; memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", ring->status_page.gfx_addr); DRM_DEBUG_DRIVER("load hws at %p\n", ring->status_page.page_addr); return 0; } static int i915_get_bridge_dev(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); if (!dev_priv->bridge_dev) { DRM_ERROR("bridge device not found\n"); return -1; } return 0; } #define MCHBAR_I915 0x44 #define MCHBAR_I965 0x48 #define MCHBAR_SIZE (4*4096) #define DEVEN_REG 0x54 #define DEVEN_MCHBAR_EN (1 << 28) /* Allocate space for the MCH regs if needed, return nonzero on error */ static int intel_alloc_mchbar_resource(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; int ret; if (INTEL_INFO(dev)->gen >= 4) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ #ifdef CONFIG_PNP if (mchbar_addr && pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) return 0; #endif /* Get some space for it */ dev_priv->mch_res.name = "i915 MCHBAR"; dev_priv->mch_res.flags = IORESOURCE_MEM; ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, MCHBAR_SIZE, MCHBAR_SIZE, PCIBIOS_MIN_MEM, 0, pcibios_align_resource, dev_priv->bridge_dev); if (ret) { DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); dev_priv->mch_res.start = 0; return ret; } if (INTEL_INFO(dev)->gen >= 4) pci_write_config_dword(dev_priv->bridge_dev, reg + 4, upper_32_bits(dev_priv->mch_res.start)); pci_write_config_dword(dev_priv->bridge_dev, reg, lower_32_bits(dev_priv->mch_res.start)); return 0; } /* Setup MCHBAR if possible, return true if we should disable it again */ static void intel_setup_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; dev_priv->mchbar_need_disable = false; if (IS_I915G(dev) || IS_I915GM(dev)) { pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); enabled = !!(temp & DEVEN_MCHBAR_EN); } else { pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); enabled = temp & 1; } /* If it's already enabled, don't have to do anything */ if (enabled) return; if (intel_alloc_mchbar_resource(dev)) return; dev_priv->mchbar_need_disable = true; /* Space is allocated or reserved, so enable it. */ if (IS_I915G(dev) || IS_I915GM(dev)) { pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp | DEVEN_MCHBAR_EN); } else { pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); } } static void intel_teardown_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; if (dev_priv->mchbar_need_disable) { if (IS_I915G(dev) || IS_I915GM(dev)) { pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); temp &= ~DEVEN_MCHBAR_EN; pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); } else { pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); temp &= ~1; pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); } } if (dev_priv->mch_res.start) release_resource(&dev_priv->mch_res); } #define PTE_ADDRESS_MASK 0xfffff000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ #define PTE_MAPPING_TYPE_CACHED (3 << 1) #define PTE_MAPPING_TYPE_MASK (3 << 1) #define PTE_VALID (1 << 0) /** * i915_stolen_to_phys - take an offset into stolen memory and turn it into * a physical one * @dev: drm device * @offset: address to translate * * Some chip functions require allocations from stolen space and need the * physical address of the memory in question. */ static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev_priv->bridge_dev; u32 base; #if 0 /* On the machines I have tested the Graphics Base of Stolen Memory * is unreliable, so compute the base by subtracting the stolen memory * from the Top of Low Usable DRAM which is where the BIOS places * the graphics stolen memory. */ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { /* top 32bits are reserved = 0 */ pci_read_config_dword(pdev, 0xA4, &base); } else { /* XXX presume 8xx is the same as i915 */ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); } #else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { u16 val; pci_read_config_word(pdev, 0xb0, &val); base = val >> 4 << 20; } else { u8 val; pci_read_config_byte(pdev, 0x9c, &val); base = val >> 3 << 27; } base -= dev_priv->mm.gtt->stolen_size; #endif return base + offset; } static void i915_warn_stolen(struct drm_device *dev) { DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); } static void i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); unsigned long cfb_base; unsigned long ll_base = 0; compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); if (compressed_fb) compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); if (!compressed_fb) goto err; cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); if (!cfb_base) goto err_fb; if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 4096, 4096, 0); if (compressed_llb) compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); if (!compressed_llb) goto err_fb; ll_base = i915_stolen_to_phys(dev, compressed_llb->start); if (!ll_base) goto err_llb; } dev_priv->cfb_size = size; intel_disable_fbc(dev); dev_priv->compressed_fb = compressed_fb; if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { I915_WRITE(FBC_CFB_BASE, cfb_base); I915_WRITE(FBC_LL_BASE, ll_base); dev_priv->compressed_llb = compressed_llb; } DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); return; err_llb: drm_mm_put_block(compressed_llb); err_fb: drm_mm_put_block(compressed_fb); err: dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; i915_warn_stolen(dev); } static void i915_cleanup_compression(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_mm_put_block(dev_priv->compressed_fb); if (dev_priv->compressed_llb) drm_mm_put_block(dev_priv->compressed_llb); } /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { struct drm_device *dev = cookie; intel_modeset_vga_set_state(dev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_INFO "i915: switched on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "i915: switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; i915_suspend(dev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } static bool i915_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); bool can_switch; spin_lock(&dev->count_lock); can_switch = (dev->open_count == 0); spin_unlock(&dev->count_lock); return can_switch; } static int i915_load_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long prealloc_size, gtt_size, mappable_size; int ret; prealloc_size = dev_priv->mm.gtt->stolen_size; gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; /* Basic memrange allocator for stolen space */ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); /* Let GEM Manage all of the aperture. * * However, leave one page at the end still bound to the scratch page. * There are a number of places where the hardware apparently * prefetches past the end of the object, and we've seen multiple * hangs with the GPU head pointer stuck in a batchbuffer bound * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); if (ret) return ret; /* Try to set up FBC with a reasonable compressed buffer size */ if (I915_HAS_FBC(dev) && i915_powersave) { int cfb_size; /* Leave 1M for line length buffer & misc. */ /* Try to get a 32M buffer... */ if (prealloc_size > (36*1024*1024)) cfb_size = 32*1024*1024; else /* fall back to 7/8 of the stolen space */ cfb_size = prealloc_size * 7 / 8; i915_setup_compression(dev, cfb_size); } /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; return 0; } static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); /* If we have > 1 VGA cards, then we need to arbitrate access * to the common VGA resources. * * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); if (ret && ret != -ENODEV) goto out; intel_register_dsm_handler(); ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, NULL, i915_switcheroo_can_switch); if (ret) goto cleanup_vga_client; /* IIR "flip pending" bit means done if this bit is set */ if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) dev_priv->flip_pending_is_done = true; intel_modeset_init(dev); ret = i915_load_gem_init(dev); if (ret) goto cleanup_vga_switcheroo; intel_modeset_gem_init(dev); ret = drm_irq_install(dev); if (ret) goto cleanup_gem; /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; ret = intel_fbdev_init(dev); if (ret) goto cleanup_irq; drm_kms_helper_poll_init(dev); /* We're off and running w/KMS */ dev_priv->mm.suspended = 0; return 0; cleanup_irq: drm_irq_uninstall(dev); cleanup_gem: mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); cleanup_vga_switcheroo: vga_switcheroo_unregister_client(dev->pdev); cleanup_vga_client: vga_client_register(dev->pdev, NULL, NULL, NULL); out: return ret; } int i915_master_create(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv; master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (!master_priv) return -ENOMEM; master->driver_priv = master_priv; return 0; } void i915_master_destroy(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv = master->driver_priv; if (!master_priv) return; kfree(master_priv); master->driver_priv = NULL; } static void i915_pineview_get_mem_freq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 tmp; tmp = I915_READ(CLKCFG); switch (tmp & CLKCFG_FSB_MASK) { case CLKCFG_FSB_533: dev_priv->fsb_freq = 533; /* 133*4 */ break; case CLKCFG_FSB_800: dev_priv->fsb_freq = 800; /* 200*4 */ break; case CLKCFG_FSB_667: dev_priv->fsb_freq = 667; /* 167*4 */ break; case CLKCFG_FSB_400: dev_priv->fsb_freq = 400; /* 100*4 */ break; } switch (tmp & CLKCFG_MEM_MASK) { case CLKCFG_MEM_533: dev_priv->mem_freq = 533; break; case CLKCFG_MEM_667: dev_priv->mem_freq = 667; break; case CLKCFG_MEM_800: dev_priv->mem_freq = 800; break; } /* detect pineview DDR3 setting */ tmp = I915_READ(CSHRDDR3CTL); dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; } static void i915_ironlake_get_mem_freq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u16 ddrpll, csipll; ddrpll = I915_READ16(DDRMPLL1); csipll = I915_READ16(CSIPLL0); switch (ddrpll & 0xff) { case 0xc: dev_priv->mem_freq = 800; break; case 0x10: dev_priv->mem_freq = 1066; break; case 0x14: dev_priv->mem_freq = 1333; break; case 0x18: dev_priv->mem_freq = 1600; break; default: DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", ddrpll & 0xff); dev_priv->mem_freq = 0; break; } dev_priv->r_t = dev_priv->mem_freq; switch (csipll & 0x3ff) { case 0x00c: dev_priv->fsb_freq = 3200; break; case 0x00e: dev_priv->fsb_freq = 3733; break; case 0x010: dev_priv->fsb_freq = 4266; break; case 0x012: dev_priv->fsb_freq = 4800; break; case 0x014: dev_priv->fsb_freq = 5333; break; case 0x016: dev_priv->fsb_freq = 5866; break; case 0x018: dev_priv->fsb_freq = 6400; break; default: DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", csipll & 0x3ff); dev_priv->fsb_freq = 0; break; } if (dev_priv->fsb_freq == 3200) { dev_priv->c_m = 0; } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { dev_priv->c_m = 1; } else { dev_priv->c_m = 2; } } static const struct cparams { u16 i; u16 t; u16 m; u16 c; } cparams[] = { { 1, 1333, 301, 28664 }, { 1, 1066, 294, 24460 }, { 1, 800, 294, 25192 }, { 0, 1333, 276, 27605 }, { 0, 1066, 276, 27605 }, { 0, 800, 231, 23784 }, }; unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { u64 total_count, diff, ret; u32 count1, count2, count3, m = 0, c = 0; unsigned long now = jiffies_to_msecs(jiffies), diff1; int i; diff1 = now - dev_priv->last_time1; /* Prevent division-by-zero if we are asking too fast. * Also, we don't get interesting results if we are polling * faster than once in 10ms, so just return the saved value * in such cases. */ if (diff1 <= 10) return dev_priv->chipset_power; count1 = I915_READ(DMIEC); count2 = I915_READ(DDREC); count3 = I915_READ(CSIEC); total_count = count1 + count2 + count3; /* FIXME: handle per-counter overflow */ if (total_count < dev_priv->last_count1) { diff = ~0UL - dev_priv->last_count1; diff += total_count; } else { diff = total_count - dev_priv->last_count1; } for (i = 0; i < ARRAY_SIZE(cparams); i++) { if (cparams[i].i == dev_priv->c_m && cparams[i].t == dev_priv->r_t) { m = cparams[i].m; c = cparams[i].c; break; } } diff = div_u64(diff, diff1); ret = ((m * diff) + c); ret = div_u64(ret, 10); dev_priv->last_count1 = total_count; dev_priv->last_time1 = now; dev_priv->chipset_power = ret; return ret; } unsigned long i915_mch_val(struct drm_i915_private *dev_priv) { unsigned long m, x, b; u32 tsfs; tsfs = I915_READ(TSFS); m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); x = I915_READ8(TR1); b = tsfs & TSFS_INTR_MASK; return ((m * x) / 127) - b; } static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { static const struct v_table { u16 vd; /* in .1 mil */ u16 vm; /* in .1 mil */ } v_table[] = { { 0, 0, }, { 375, 0, }, { 500, 0, }, { 625, 0, }, { 750, 0, }, { 875, 0, }, { 1000, 0, }, { 1125, 0, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4250, 3125, }, { 4375, 3250, }, { 4500, 3375, }, { 4625, 3500, }, { 4750, 3625, }, { 4875, 3750, }, { 5000, 3875, }, { 5125, 4000, }, { 5250, 4125, }, { 5375, 4250, }, { 5500, 4375, }, { 5625, 4500, }, { 5750, 4625, }, { 5875, 4750, }, { 6000, 4875, }, { 6125, 5000, }, { 6250, 5125, }, { 6375, 5250, }, { 6500, 5375, }, { 6625, 5500, }, { 6750, 5625, }, { 6875, 5750, }, { 7000, 5875, }, { 7125, 6000, }, { 7250, 6125, }, { 7375, 6250, }, { 7500, 6375, }, { 7625, 6500, }, { 7750, 6625, }, { 7875, 6750, }, { 8000, 6875, }, { 8125, 7000, }, { 8250, 7125, }, { 8375, 7250, }, { 8500, 7375, }, { 8625, 7500, }, { 8750, 7625, }, { 8875, 7750, }, { 9000, 7875, }, { 9125, 8000, }, { 9250, 8125, }, { 9375, 8250, }, { 9500, 8375, }, { 9625, 8500, }, { 9750, 8625, }, { 9875, 8750, }, { 10000, 8875, }, { 10125, 9000, }, { 10250, 9125, }, { 10375, 9250, }, { 10500, 9375, }, { 10625, 9500, }, { 10750, 9625, }, { 10875, 9750, }, { 11000, 9875, }, { 11125, 10000, }, { 11250, 10125, }, { 11375, 10250, }, { 11500, 10375, }, { 11625, 10500, }, { 11750, 10625, }, { 11875, 10750, }, { 12000, 10875, }, { 12125, 11000, }, { 12250, 11125, }, { 12375, 11250, }, { 12500, 11375, }, { 12625, 11500, }, { 12750, 11625, }, { 12875, 11750, }, { 13000, 11875, }, { 13125, 12000, }, { 13250, 12125, }, { 13375, 12250, }, { 13500, 12375, }, { 13625, 12500, }, { 13750, 12625, }, { 13875, 12750, }, { 14000, 12875, }, { 14125, 13000, }, { 14250, 13125, }, { 14375, 13250, }, { 14500, 13375, }, { 14625, 13500, }, { 14750, 13625, }, { 14875, 13750, }, { 15000, 13875, }, { 15125, 14000, }, { 15250, 14125, }, { 15375, 14250, }, { 15500, 14375, }, { 15625, 14500, }, { 15750, 14625, }, { 15875, 14750, }, { 16000, 14875, }, { 16125, 15000, }, }; if (dev_priv->info->is_mobile) return v_table[pxvid].vm; else return v_table[pxvid].vd; } void i915_update_gfx_val(struct drm_i915_private *dev_priv) { struct timespec now, diff1; u64 diff; unsigned long diffms; u32 count; getrawmonotonic(&now); diff1 = timespec_sub(now, dev_priv->last_time2); /* Don't divide by 0 */ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; if (!diffms) return; count = I915_READ(GFXEC); if (count < dev_priv->last_count2) { diff = ~0UL - dev_priv->last_count2; diff += count; } else { diff = count - dev_priv->last_count2; } dev_priv->last_count2 = count; dev_priv->last_time2 = now; /* More magic constants... */ diff = diff * 1181; diff = div_u64(diff, diffms * 10); dev_priv->gfx_power = diff; } unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { unsigned long t, corr, state1, corr2, state2; u32 pxvid, ext_v; pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); pxvid = (pxvid >> 24) & 0x7f; ext_v = pvid_to_extvid(dev_priv, pxvid); state1 = ext_v; t = i915_mch_val(dev_priv); /* Revel in the empirically derived constants */ /* Correction factor in 1/100000 units */ if (t > 80) corr = ((t * 2349) + 135940); else if (t >= 50) corr = ((t * 964) + 29317); else /* < 50 */ corr = ((t * 301) + 1004); corr = corr * ((150142 * state1) / 10000 - 78642); corr /= 100000; corr2 = (corr * dev_priv->corr); state2 = (corr2 * state1) / 10000; state2 /= 100; /* convert to mW */ i915_update_gfx_val(dev_priv); return dev_priv->gfx_power + state2; } /* Global for IPS driver to get at the current i915 device */ static struct drm_i915_private *i915_mch_dev; /* * Lock protecting IPS related data structures * - i915_mch_dev * - dev_priv->max_delay * - dev_priv->min_delay * - dev_priv->fmax * - dev_priv->gpu_busy */ static DEFINE_SPINLOCK(mchdev_lock); /** * i915_read_mch_val - return value for IPS use * * Calculate and return a value for the IPS driver to use when deciding whether * we have thermal and power headroom to increase CPU or GPU power budget. */ unsigned long i915_read_mch_val(void) { struct drm_i915_private *dev_priv; unsigned long chipset_val, graphics_val, ret = 0; spin_lock(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; chipset_val = i915_chipset_val(dev_priv); graphics_val = i915_gfx_val(dev_priv); ret = chipset_val + graphics_val; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_read_mch_val); /** * i915_gpu_raise - raise GPU frequency limit * * Raise the limit; IPS indicates we have thermal headroom. */ bool i915_gpu_raise(void) { struct drm_i915_private *dev_priv; bool ret = true; spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; if (dev_priv->max_delay > dev_priv->fmax) dev_priv->max_delay--; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_raise); /** * i915_gpu_lower - lower GPU frequency limit * * IPS indicates we're close to a thermal limit, so throttle back the GPU * frequency maximum. */ bool i915_gpu_lower(void) { struct drm_i915_private *dev_priv; bool ret = true; spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; if (dev_priv->max_delay < dev_priv->min_delay) dev_priv->max_delay++; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_lower); /** * i915_gpu_busy - indicate GPU business to IPS * * Tell the IPS driver whether or not the GPU is busy. */ bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv; bool ret = false; spin_lock(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; ret = dev_priv->busy; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_busy); /** * i915_gpu_turbo_disable - disable graphics turbo * * Disable graphics turbo by resetting the max frequency and setting the * current frequency to the default. */ bool i915_gpu_turbo_disable(void) { struct drm_i915_private *dev_priv; bool ret = true; spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; dev_priv->max_delay = dev_priv->fstart; if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) ret = false; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); /** * Tells the intel_ips driver that the i915 driver is now loaded, if * IPS got loaded first. * * This awkward dance is so that neither module has to depend on the * other in order for IPS to do the appropriate communication of * GPU turbo limits to i915. */ static void ips_ping_for_i915_load(void) { void (*link)(void); link = symbol_get(ips_link_to_i915_driver); if (link) { link(); symbol_put(ips_link_to_i915_driver); } } /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device * @flags: startup flags * * The driver load routine has to do several things: * - drive output discovery via intel_modeset_init() * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory */ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; int ret = 0, mmio_bar; uint32_t agp_size; /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; dev_priv->info = (struct intel_device_info *) flags; if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; } /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located * above 4GB. * * The documentation also mentions an issue with undefined * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); mmio_bar = IS_GEN2(dev) ? 1 : 0; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } dev_priv->mm.gtt = intel_gtt_get(); if (!dev_priv->mm.gtt) { DRM_ERROR("Failed to initialize GTT\n"); ret = -ENODEV; goto out_rmmap; } agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, agp_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; } /* Set up a WC MTRR for non-PAT systems. This is more common than * one would think, because the kernel disables PAT on first * generation Core chips because WC PAT gets overridden by a UC * MTRR if present. Even if a UC MTRR isn't present. */ dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, agp_size, MTRR_TYPE_WRCOMB, 1); if (dev_priv->mm.gtt_mtrr < 0) { DRM_INFO("MTRR allocation failed. Graphics " "performance may suffer.\n"); } /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed * by the GPU. i915_gem_retire_requests() is called directly when we * need high-priority retirement, such as waiting for an explicit * bo. * * It is also used for periodic low-priority events, such as * idle-timers and recording error state. * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the * workqueue at any time: max_active = 1 and NON_REENTRANT. */ dev_priv->wq = alloc_workqueue("i915", WQ_UNBOUND | WQ_NON_REENTRANT, 1); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; goto out_mtrrfree; } /* enable GEM by default */ dev_priv->has_gem = 1; intel_irq_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); intel_setup_gmbus(dev); intel_opregion_setup(dev); /* Make sure the bios did its job and set up vital registers */ intel_setup_bios(dev); i915_gem_load(dev); /* Init HWS */ if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret) goto out_gem_unload; } if (IS_PINEVIEW(dev)) i915_pineview_get_mem_freq(dev); else if (IS_GEN5(dev)) i915_ironlake_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function * correctly in testing on 945G. * This may be a side effect of MSI having been made available for PEG * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->rps_lock); if (IS_MOBILE(dev) || !IS_GEN2(dev)) dev_priv->num_pipe = 2; else dev_priv->num_pipe = 1; ret = drm_vblank_init(dev, dev_priv->num_pipe); if (ret) goto out_gem_unload; /* Start out suspended */ dev_priv->mm.suspended = 1; intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_gem_unload; } } /* Must be done after probing outputs */ intel_opregion_init(dev); acpi_video_register(); setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); spin_lock(&mchdev_lock); i915_mch_dev = dev_priv; dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); ips_ping_for_i915_load(); return 0; out_gem_unload: if (dev_priv->mm.inactive_shrinker.shrink) unregister_shrinker(&dev_priv->mm.inactive_shrinker); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); out_mtrrfree: if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, dev->agp->agp_info.aper_size * 1024 * 1024); dev_priv->mm.gtt_mtrr = -1; } io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: kfree(dev_priv); return ret; } int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; spin_lock(&mchdev_lock); i915_mch_dev = NULL; spin_unlock(&mchdev_lock); if (dev_priv->mm.inactive_shrinker.shrink) unregister_shrinker(&dev_priv->mm.inactive_shrinker); mutex_lock(&dev->struct_mutex); ret = i915_gpu_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); mutex_unlock(&dev->struct_mutex); /* Cancel the retire work handler, which should be idle now. */ cancel_delayed_work_sync(&dev_priv->mm.retire_work); io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, dev->agp->agp_info.aper_size * 1024 * 1024); dev_priv->mm.gtt_mtrr = -1; } acpi_video_unregister(); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fbdev_fini(dev); intel_modeset_cleanup(dev); /* * free the memory space allocated for the child device * config parsed from VBT */ if (dev_priv->child_dev && dev_priv->child_dev_num) { kfree(dev_priv->child_dev); dev_priv->child_dev = NULL; dev_priv->child_dev_num = 0; } vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } /* Free error state after interrupts are fully disabled. */ del_timer_sync(&dev_priv->hangcheck_timer); cancel_work_sync(&dev_priv->error_work); i915_destroy_error_state(dev); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); drm_mm_takedown(&dev_priv->mm.stolen); intel_cleanup_overlay(dev); if (!I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); } if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); pci_dev_put(dev_priv->bridge_dev); kfree(dev->dev_private); return 0; } int i915_driver_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; file->driver_priv = file_priv; spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); return 0; } /** * i915_driver_lastclose - clean up after all DRM clients have exited * @dev: DRM device * * Take care of cleaning up after all DRM clients have exited. In the * mode setting case, we want to restore the kernel's initial mode (just * in case the last client left us in a bad state). * * Additionally, in the non-mode setting case, we'll tear down the AGP * and DMA structures, since the kernel won't be using them, and clea * up any GEM state. */ void i915_driver_lastclose(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fb_restore_mode(dev); vga_switcheroo_process_delayed_switch(); return; } i915_gem_lastclose(dev); if (dev_priv->agp_heap) i915_mem_takedown(&(dev_priv->agp_heap)); i915_dma_cleanup(dev); } void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; i915_gem_release(dev, file_priv); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; kfree(file_priv); } struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); /** * Determine if the device really is AGP or not. * * All Intel graphics chipsets are treated as AGP, even if they are really * PCI-e. * * \param dev The device to be tested. * * \returns * A value of 1 is always retured to indictate every i9x5 is AGP. */ int i915_driver_device_is_agp(struct drm_device * dev) { return 1; }
gpl-2.0
manabian/linux-lpc
drivers/cpufreq/sparc-us2e-cpufreq.c
1800
9054
/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support * * Copyright (C) 2003 David S. Miller (davem@redhat.com) * * Many thanks to Dominik Brodowski for fixing up the cpufreq * infrastructure in order to make this driver easier to implement. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/cpufreq.h> #include <linux/threads.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <asm/asi.h> #include <asm/timer.h> static struct cpufreq_driver *cpufreq_us2e_driver; struct us2e_freq_percpu_info { struct cpufreq_frequency_table table[6]; }; /* Indexed by cpu number. */ static struct us2e_freq_percpu_info *us2e_freq_table; #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled * in the ESTAR mode control register. */ #define ESTAR_MODE_DIV_1 0x0000000000000000UL #define ESTAR_MODE_DIV_2 0x0000000000000001UL #define ESTAR_MODE_DIV_4 0x0000000000000003UL #define ESTAR_MODE_DIV_6 0x0000000000000002UL #define ESTAR_MODE_DIV_8 0x0000000000000004UL #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL #define MCTRL0_REFR_COUNT_SHIFT 8 #define MCTRL0_REFR_INTERVAL 7800 #define MCTRL0_REFR_CLKS_P_CNT 64 static unsigned long read_hbreg(unsigned long addr) { unsigned long ret; __asm__ __volatile__("ldxa [%1] %2, %0" : "=&r" (ret) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); return ret; } static void write_hbreg(unsigned long addr, unsigned long val) { __asm__ __volatile__("stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) : "memory"); if (addr == HBIRD_ESTAR_MODE_ADDR) { /* Need to wait 16 clock cycles for the PLL to lock. */ udelay(1); } } static void self_refresh_ctl(int enable) { unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); if (enable) mctrl |= MCTRL0_SREFRESH_ENAB; else mctrl &= ~MCTRL0_SREFRESH_ENAB; write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); } static void frob_mem_refresh(int cpu_slowing_down, unsigned long clock_tick, unsigned long old_divisor, unsigned long divisor) { unsigned long old_refr_count, refr_count, mctrl; refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) >> MCTRL0_REFR_COUNT_SHIFT; mctrl &= ~MCTRL0_REFR_COUNT_MASK; mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { unsigned long usecs; /* We have to wait for both refresh counts (old * and new) to go to zero. */ usecs = (MCTRL0_REFR_CLKS_P_CNT * (refr_count + old_refr_count) * 1000000UL * old_divisor) / clock_tick; udelay(usecs + 1UL); } } static void us2e_transition(unsigned long estar, unsigned long new_bits, unsigned long clock_tick, unsigned long old_divisor, unsigned long divisor) { unsigned long flags; local_irq_save(flags); estar &= ~ESTAR_MODE_DIV_MASK; /* This is based upon the state transition diagram in the IIe manual. */ if (old_divisor == 2 && divisor == 1) { self_refresh_ctl(0); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); frob_mem_refresh(0, clock_tick, old_divisor, divisor); } else if (old_divisor == 1 && divisor == 2) { frob_mem_refresh(1, clock_tick, old_divisor, divisor); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); self_refresh_ctl(1); } else if (old_divisor == 1 && divisor > 2) { us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, 1, 2); us2e_transition(estar, new_bits, clock_tick, 2, divisor); } else if (old_divisor > 2 && divisor == 1) { us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, old_divisor, 2); us2e_transition(estar, new_bits, clock_tick, 2, divisor); } else if (old_divisor < divisor) { frob_mem_refresh(0, clock_tick, old_divisor, divisor); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); } else if (old_divisor > divisor) { write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); frob_mem_refresh(1, clock_tick, old_divisor, divisor); } else { BUG(); } local_irq_restore(flags); } static unsigned long index_to_estar_mode(unsigned int index) { switch (index) { case 0: return ESTAR_MODE_DIV_1; case 1: return ESTAR_MODE_DIV_2; case 2: return ESTAR_MODE_DIV_4; case 3: return ESTAR_MODE_DIV_6; case 4: return ESTAR_MODE_DIV_8; default: BUG(); } } static unsigned long index_to_divisor(unsigned int index) { switch (index) { case 0: return 1; case 1: return 2; case 2: return 4; case 3: return 6; case 4: return 8; default: BUG(); } } static unsigned long estar_to_divisor(unsigned long estar) { unsigned long ret; switch (estar & ESTAR_MODE_DIV_MASK) { case ESTAR_MODE_DIV_1: ret = 1; break; case ESTAR_MODE_DIV_2: ret = 2; break; case ESTAR_MODE_DIV_4: ret = 4; break; case ESTAR_MODE_DIV_6: ret = 6; break; case ESTAR_MODE_DIV_8: ret = 8; break; default: BUG(); } return ret; } static unsigned int us2e_freq_get(unsigned int cpu) { cpumask_t cpus_allowed; unsigned long clock_tick, estar; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); clock_tick = sparc64_get_clock_tick(cpu) / 1000; estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); set_cpus_allowed_ptr(current, &cpus_allowed); return clock_tick / estar_to_divisor(estar); } static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq; unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; new_bits = index_to_estar_mode(index); divisor = index_to_divisor(index); new_freq /= divisor; estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); old_divisor = estar_to_divisor(estar); if (old_divisor != divisor) us2e_transition(estar, new_bits, clock_tick * 1000, old_divisor, divisor); set_cpus_allowed_ptr(current, &cpus_allowed); return 0; } static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; struct cpufreq_frequency_table *table = &us2e_freq_table[cpu].table[0]; table[0].driver_data = 0; table[0].frequency = clock_tick / 1; table[1].driver_data = 1; table[1].frequency = clock_tick / 2; table[2].driver_data = 2; table[2].frequency = clock_tick / 4; table[2].driver_data = 3; table[2].frequency = clock_tick / 6; table[2].driver_data = 4; table[2].frequency = clock_tick / 8; table[2].driver_data = 5; table[3].frequency = CPUFREQ_TABLE_END; policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; return cpufreq_table_validate_and_show(policy, table); } static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us2e_driver) us2e_freq_target(policy, 0); return 0; } static int __init us2e_freq_init(void) { unsigned long manuf, impl, ver; int ret; if (tlb_type != spitfire) return -ENODEV; __asm__("rdpr %%ver, %0" : "=r" (ver)); manuf = ((ver >> 48) & 0xffff); impl = ((ver >> 32) & 0xffff); if (manuf == 0x17 && impl == 0x13) { struct cpufreq_driver *driver; ret = -ENOMEM; driver = kzalloc(sizeof(*driver), GFP_KERNEL); if (!driver) goto err_out; us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)), GFP_KERNEL); if (!us2e_freq_table) goto err_out; driver->init = us2e_freq_cpu_init; driver->verify = cpufreq_generic_frequency_table_verify; driver->target_index = us2e_freq_target; driver->get = us2e_freq_get; driver->exit = us2e_freq_cpu_exit; strcpy(driver->name, "UltraSPARC-IIe"); cpufreq_us2e_driver = driver; ret = cpufreq_register_driver(driver); if (ret) goto err_out; return 0; err_out: if (driver) { kfree(driver); cpufreq_us2e_driver = NULL; } kfree(us2e_freq_table); us2e_freq_table = NULL; return ret; } return -ENODEV; } static void __exit us2e_freq_exit(void) { if (cpufreq_us2e_driver) { cpufreq_unregister_driver(cpufreq_us2e_driver); kfree(cpufreq_us2e_driver); cpufreq_us2e_driver = NULL; kfree(us2e_freq_table); us2e_freq_table = NULL; } } MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); MODULE_LICENSE("GPL"); module_init(us2e_freq_init); module_exit(us2e_freq_exit);
gpl-2.0
bq-dev/android_kernel_bq_common
drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
2312
1875
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/devinit.h> #include <subdev/vga.h> struct nv1a_devinit_priv { struct nouveau_devinit base; u8 owner; }; static int nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv1a_devinit_priv *priv; int ret; ret = nouveau_devinit_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; return 0; } struct nouveau_oclass nv1a_devinit_oclass = { .handle = NV_SUBDEV(DEVINIT, 0x1a), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv1a_devinit_ctor, .dtor = nv04_devinit_dtor, .init = nv04_devinit_init, .fini = nv04_devinit_fini, }, };
gpl-2.0
Myself5/android_kernel_sony_msm
drivers/staging/vt6655/michael.c
2568
4631
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: michael.cpp * * Purpose: The implementation of LIST data structure. * * Author: Kyle Hsu * * Date: Sep 4, 2002 * * Functions: * s_dwGetUINT32 - Convert from unsigned char [] to unsigned long in a portable way * s_vPutUINT32 - Convert from unsigned long to unsigned char [] in a portable way * s_vClear - Reset the state to the empty message. * s_vSetKey - Set the key. * MIC_vInit - Set the key. * s_vAppendByte - Append the byte to our word-sized buffer. * MIC_vAppend - call s_vAppendByte. * MIC_vGetMIC - Append the minimum padding and call s_vAppendByte. * * Revision History: * */ #include "tmacro.h" #include "michael.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /* static unsigned long s_dwGetUINT32(unsigned char *p); // Get unsigned long from 4 bytes LSByte first static void s_vPutUINT32(unsigned char *p, unsigned long val); // Put unsigned long into 4 bytes LSByte first */ static void s_vClear(void); // Clear the internal message, // resets the object to the state just after construction. static void s_vSetKey(unsigned long dwK0, unsigned long dwK1); static void s_vAppendByte(unsigned char b); // Add a single byte to the internal message /*--------------------- Export Variables --------------------------*/ static unsigned long L, R; // Current state static unsigned long K0, K1; // Key static unsigned long M; // Message accumulator (single word) static unsigned int nBytesInM; // # bytes in M /*--------------------- Export Functions --------------------------*/ /* static unsigned long s_dwGetUINT32 (unsigned char *p) // Convert from unsigned char [] to unsigned long in a portable way { unsigned long res = 0; unsigned int i; for (i=0; i<4; i++) { res |= (*p++) << (8 * i); } return res; } static void s_vPutUINT32 (unsigned char *p, unsigned long val) // Convert from unsigned long to unsigned char [] in a portable way { unsigned int i; for (i=0; i<4; i++) { *p++ = (unsigned char) (val & 0xff); val >>= 8; } } */ static void s_vClear(void) { // Reset the state to the empty message. L = K0; R = K1; nBytesInM = 0; M = 0; } static void s_vSetKey(unsigned long dwK0, unsigned long dwK1) { // Set the key K0 = dwK0; K1 = dwK1; // and reset the message s_vClear(); } static void s_vAppendByte(unsigned char b) { // Append the byte to our word-sized buffer M |= b << (8*nBytesInM); nBytesInM++; // Process the word if it is full. if (nBytesInM >= 4) { L ^= M; R ^= ROL32(L, 17); L += R; R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); L += R; R ^= ROL32(L, 3); L += R; R ^= ROR32(L, 2); L += R; // Clear the buffer M = 0; nBytesInM = 0; } } void MIC_vInit(unsigned long dwK0, unsigned long dwK1) { // Set the key s_vSetKey(dwK0, dwK1); } void MIC_vUnInit(void) { // Wipe the key material K0 = 0; K1 = 0; // And the other fields as well. //Note that this sets (L,R) to (K0,K1) which is just fine. s_vClear(); } void MIC_vAppend(unsigned char *src, unsigned int nBytes) { // This is simple while (nBytes > 0) { s_vAppendByte(*src++); nBytes--; } } void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR) { // Append the minimum padding s_vAppendByte(0x5a); s_vAppendByte(0); s_vAppendByte(0); s_vAppendByte(0); s_vAppendByte(0); // and then zeroes until the length is a multiple of 4 while (nBytesInM != 0) { s_vAppendByte(0); } // The s_vAppendByte function has already computed the result. *pdwL = L; *pdwR = R; // Reset to the empty message. s_vClear(); }
gpl-2.0
BlackBox-Kernel/blackbox_tomato_lp
fs/hfsplus/attributes.c
2568
9319
/* * linux/fs/hfsplus/attributes.c * * Vyacheslav Dubeyko <slava@dubeyko.com> * * Handling of records in attributes tree */ #include "hfsplus_fs.h" #include "hfsplus_raw.h" static struct kmem_cache *hfsplus_attr_tree_cachep; int hfsplus_create_attr_tree_cache(void) { if (hfsplus_attr_tree_cachep) return -EEXIST; hfsplus_attr_tree_cachep = kmem_cache_create("hfsplus_attr_cache", sizeof(hfsplus_attr_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (!hfsplus_attr_tree_cachep) return -ENOMEM; return 0; } void hfsplus_destroy_attr_tree_cache(void) { kmem_cache_destroy(hfsplus_attr_tree_cachep); } int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1, const hfsplus_btree_key *k2) { __be32 k1_cnid, k2_cnid; k1_cnid = k1->attr.cnid; k2_cnid = k2->attr.cnid; if (k1_cnid != k2_cnid) return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1; return hfsplus_strcmp( (const struct hfsplus_unistr *)&k1->attr.key_name, (const struct hfsplus_unistr *)&k2->attr.key_name); } int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key, u32 cnid, const char *name) { int len; memset(key, 0, sizeof(struct hfsplus_attr_key)); key->attr.cnid = cpu_to_be32(cnid); if (name) { len = strlen(name); if (len > HFSPLUS_ATTR_MAX_STRLEN) { pr_err("invalid xattr name's length\n"); return -EINVAL; } hfsplus_asc2uni(sb, (struct hfsplus_unistr *)&key->attr.key_name, HFSPLUS_ATTR_MAX_STRLEN, name, len); len = be16_to_cpu(key->attr.key_name.length); } else { key->attr.key_name.length = 0; len = 0; } /* The length of the key, as stored in key_len field, does not include * the size of the key_len field itself. * So, offsetof(hfsplus_attr_key, key_name) is a trick because * it takes into consideration key_len field (__be16) of * hfsplus_attr_key structure instead of length field (__be16) of * hfsplus_attr_unistr structure. */ key->key_len = cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) + 2 * len); return 0; } void hfsplus_attr_build_key_uni(hfsplus_btree_key *key, u32 cnid, struct hfsplus_attr_unistr *name) { int ustrlen; memset(key, 0, sizeof(struct hfsplus_attr_key)); ustrlen = be16_to_cpu(name->length); key->attr.cnid = cpu_to_be32(cnid); key->attr.key_name.length = cpu_to_be16(ustrlen); ustrlen *= 2; memcpy(key->attr.key_name.unicode, name->unicode, ustrlen); /* The length of the key, as stored in key_len field, does not include * the size of the key_len field itself. * So, offsetof(hfsplus_attr_key, key_name) is a trick because * it takes into consideration key_len field (__be16) of * hfsplus_attr_key structure instead of length field (__be16) of * hfsplus_attr_unistr structure. */ key->key_len = cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) + ustrlen); } hfsplus_attr_entry *hfsplus_alloc_attr_entry(void) { return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL); } void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry) { if (entry) kmem_cache_free(hfsplus_attr_tree_cachep, entry); } #define HFSPLUS_INVALID_ATTR_RECORD -1 static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type, u32 cnid, const void *value, size_t size) { if (record_type == HFSPLUS_ATTR_FORK_DATA) { /* * Mac OS X supports only inline data attributes. * Do nothing */ memset(entry, 0, sizeof(*entry)); return sizeof(struct hfsplus_attr_fork_data); } else if (record_type == HFSPLUS_ATTR_EXTENTS) { /* * Mac OS X supports only inline data attributes. * Do nothing. */ memset(entry, 0, sizeof(*entry)); return sizeof(struct hfsplus_attr_extents); } else if (record_type == HFSPLUS_ATTR_INLINE_DATA) { u16 len; memset(entry, 0, sizeof(struct hfsplus_attr_inline_data)); entry->inline_data.record_type = cpu_to_be32(record_type); if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE) len = size; else return HFSPLUS_INVALID_ATTR_RECORD; entry->inline_data.length = cpu_to_be16(len); memcpy(entry->inline_data.raw_bytes, value, len); /* * Align len on two-byte boundary. * It needs to add pad byte if we have odd len. */ len = round_up(len, 2); return offsetof(struct hfsplus_attr_inline_data, raw_bytes) + len; } else /* invalid input */ memset(entry, 0, sizeof(*entry)); return HFSPLUS_INVALID_ATTR_RECORD; } int hfsplus_find_attr(struct super_block *sb, u32 cnid, const char *name, struct hfs_find_data *fd) { int err = 0; hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } if (name) { err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name); if (err) goto failed_find_attr; err = hfs_brec_find(fd, hfs_find_rec_by_key); if (err) goto failed_find_attr; } else { err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL); if (err) goto failed_find_attr; err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid); if (err) goto failed_find_attr; } failed_find_attr: return err; } int hfsplus_attr_exists(struct inode *inode, const char *name) { int err = 0; struct super_block *sb = inode->i_sb; struct hfs_find_data fd; if (!HFSPLUS_SB(sb)->attr_tree) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) return 0; err = hfsplus_find_attr(sb, inode->i_ino, name, &fd); if (err) goto attr_not_found; hfs_find_exit(&fd); return 1; attr_not_found: hfs_find_exit(&fd); return 0; } int hfsplus_create_attr(struct inode *inode, const char *name, const void *value, size_t size) { struct super_block *sb = inode->i_sb; struct hfs_find_data fd; hfsplus_attr_entry *entry_ptr; int entry_size; int err; hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n", name ? name : NULL, inode->i_ino); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } entry_ptr = hfsplus_alloc_attr_entry(); if (!entry_ptr) return -ENOMEM; err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) goto failed_init_create_attr; if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); if (err) goto failed_create_attr; } else { err = -EINVAL; goto failed_create_attr; } /* Mac OS X supports only inline data attributes. */ entry_size = hfsplus_attr_build_record(entry_ptr, HFSPLUS_ATTR_INLINE_DATA, inode->i_ino, value, size); if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) { err = -EINVAL; goto failed_create_attr; } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err != -ENOENT) { if (!err) err = -EEXIST; goto failed_create_attr; } err = hfs_brec_insert(&fd, entry_ptr, entry_size); if (err) goto failed_create_attr; hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY); failed_create_attr: hfs_find_exit(&fd); failed_init_create_attr: hfsplus_destroy_attr_entry(entry_ptr); return err; } static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, struct hfs_find_data *fd) { int err = 0; __be32 found_cnid, record_type; hfs_bnode_read(fd->bnode, &found_cnid, fd->keyoffset + offsetof(struct hfsplus_attr_key, cnid), sizeof(__be32)); if (cnid != be32_to_cpu(found_cnid)) return -ENOENT; hfs_bnode_read(fd->bnode, &record_type, fd->entryoffset, sizeof(record_type)); switch (be32_to_cpu(record_type)) { case HFSPLUS_ATTR_INLINE_DATA: /* All is OK. Do nothing. */ break; case HFSPLUS_ATTR_FORK_DATA: case HFSPLUS_ATTR_EXTENTS: pr_err("only inline data xattr are supported\n"); return -EOPNOTSUPP; default: pr_err("invalid extended attribute record\n"); return -ENOENT; } err = hfs_brec_remove(fd); if (err) return err; hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY); return err; } int hfsplus_delete_attr(struct inode *inode, const char *name) { int err = 0; struct super_block *sb = inode->i_sb; struct hfs_find_data fd; hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n", name ? name : NULL, inode->i_ino); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) return err; if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); if (err) goto out; } else { pr_err("invalid extended attribute name\n"); err = -EINVAL; goto out; } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err) goto out; err = __hfsplus_delete_attr(inode, inode->i_ino, &fd); if (err) goto out; out: hfs_find_exit(&fd); return err; } int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid) { int err = 0; struct hfs_find_data fd; hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid); if (!HFSPLUS_SB(dir->i_sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd); if (err) return err; for (;;) { err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd); if (err) { if (err != -ENOENT) pr_err("xattr search failed\n"); goto end_delete_all; } err = __hfsplus_delete_attr(dir, cnid, &fd); if (err) goto end_delete_all; } end_delete_all: hfs_find_exit(&fd); return err; }
gpl-2.0
MeltedButter/kernel_msm
drivers/media/usb/dvb-usb-v2/au6610.c
4616
5236
/* * DVB USB Linux driver for Alcor Micro AU6610 DVB-T USB2.0. * * Copyright (C) 2006 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "au6610.h" #include "zl10353.h" #include "qt1010.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int au6610_usb_msg(struct dvb_usb_device *d, u8 operation, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { int ret; u16 index; u8 *usb_buf; /* * allocate enough for all known requests, * read returns 5 and write 6 bytes */ usb_buf = kmalloc(6, GFP_KERNEL); if (!usb_buf) return -ENOMEM; switch (wlen) { case 1: index = wbuf[0] << 8; break; case 2: index = wbuf[0] << 8; index += wbuf[1]; break; default: dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n", KBUILD_MODNAME, wlen); ret = -EINVAL; goto error; } ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation, USB_TYPE_VENDOR|USB_DIR_IN, addr << 1, index, usb_buf, 6, AU6610_USB_TIMEOUT); dvb_usb_dbg_usb_control_msg(d->udev, operation, (USB_TYPE_VENDOR|USB_DIR_IN), addr << 1, index, usb_buf, 6); if (ret < 0) goto error; switch (operation) { case AU6610_REQ_I2C_READ: case AU6610_REQ_USB_READ: /* requested value is always 5th byte in buffer */ rbuf[0] = usb_buf[4]; } error: kfree(usb_buf); return ret; } static int au6610_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 request; u8 wo = (rbuf == NULL || rlen == 0); /* write-only */ if (wo) { request = AU6610_REQ_I2C_WRITE; } else { /* rw */ request = AU6610_REQ_I2C_READ; } return au6610_usb_msg(d, request, addr, wbuf, wlen, rbuf, rlen); } /* I2C */ static int au6610_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) break; i++; } else if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 au6610_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm au6610_i2c_algo = { .master_xfer = au6610_i2c_xfer, .functionality = au6610_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config au6610_zl10353_config = { .demod_address = 0x0f, .no_tuner = 1, .parallel_ts = 1, }; static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe[0] = dvb_attach(zl10353_attach, &au6610_zl10353_config, &adap_to_d(adap)->i2c_adap); if (adap->fe[0] == NULL) return -ENODEV; return 0; } static struct qt1010_config au6610_qt1010_config = { .i2c_address = 0x62 }; static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(qt1010_attach, adap->fe[0], &adap_to_d(adap)->i2c_adap, &au6610_qt1010_config) == NULL ? -ENODEV : 0; } static int au6610_init(struct dvb_usb_device *d) { /* TODO: this functionality belongs likely to the streaming control */ /* bInterfaceNumber 0, bAlternateSetting 5 */ return usb_set_interface(d->udev, 0, 5); } static struct dvb_usb_device_properties au6610_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .i2c_algo = &au6610_i2c_algo, .frontend_attach = au6610_zl10353_frontend_attach, .tuner_attach = au6610_qt1010_tuner_attach, .init = au6610_init, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_ISOC(0x82, 5, 40, 942, 1), }, }, }; static const struct usb_device_id au6610_id_table[] = { { DVB_USB_DEVICE(USB_VID_ALCOR_MICRO, USB_PID_SIGMATEK_DVB_110, &au6610_props, "Sigmatek DVB-110", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, au6610_id_table); static struct usb_driver au6610_driver = { .name = KBUILD_MODNAME, .id_table = au6610_id_table, .probe = dvb_usbv2_probe, .disconnect = dvb_usbv2_disconnect, .suspend = dvb_usbv2_suspend, .resume = dvb_usbv2_resume, .reset_resume = dvb_usbv2_reset_resume, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(au6610_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
TipsyOs-Devices/android_kernel_samsung_trlte
drivers/media/pci/cx18/cx18-av-vbi.c
4616
8979
/* * cx18 ADEC VBI functions * * Derived from cx25840-vbi.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "cx18-driver.h" /* * For sliced VBI output, we set up to use VIP-1.1, 8-bit mode, * NN counts 1 byte Dwords, an IDID with the VBI line # in it. * Thus, according to the VIP-2 Spec, our VBI ancillary data lines * (should!) look like: * 4 byte EAV code: 0xff 0x00 0x00 0xRP * unknown number of possible idle bytes * 3 byte Anc data preamble: 0x00 0xff 0xff * 1 byte data identifier: ne010iii (parity bits, 010, DID bits) * 1 byte secondary data id: nessssss (parity bits, SDID bits) * 1 byte data word count: necccccc (parity bits, NN Dword count) * 2 byte Internal DID: VBI-line-# 0x80 * NN data bytes * 1 byte checksum * Fill bytes needed to fil out to 4*NN bytes of payload * * The RP codes for EAVs when in VIP-1.1 mode, not in raw mode, & * in the vertical blanking interval are: * 0xb0 (Task 0 VerticalBlank HorizontalBlank 0 0 0 0) * 0xf0 (Task EvenField VerticalBlank HorizontalBlank 0 0 0 0) * * Since the V bit is only allowed to toggle in the EAV RP code, just * before the first active region line and for active lines, they are: * 0x90 (Task 0 0 HorizontalBlank 0 0 0 0) * 0xd0 (Task EvenField 0 HorizontalBlank 0 0 0 0) * * The user application DID bytes we care about are: * 0x91 (1 0 010 0 !ActiveLine AncDataPresent) * 0x55 (0 1 010 2ndField !ActiveLine AncDataPresent) * */ static const u8 sliced_vbi_did[2] = { 0x91, 0x55 }; struct vbi_anc_data { /* u8 eav[4]; */ /* u8 idle[]; Variable number of idle bytes */ u8 preamble[3]; u8 did; u8 sdid; u8 data_count; u8 idid[2]; u8 payload[1]; /* data_count of payload */ /* u8 checksum; */ /* u8 fill[]; Variable number of fill bytes */ }; static int odd_parity(u8 c) { c ^= (c >> 4); c ^= (c >> 2); c ^= (c >> 1); return c & 1; } static int decode_vps(u8 *dst, u8 *p) { static const u8 biphase_tbl[] = { 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87, 0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3, 0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85, 0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86, 0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2, 0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84, 0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, }; u8 c, err = 0; int i; for (i = 0; i < 2 * 13; i += 2) { err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]]; c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4); dst[i / 2] = c; } return err & 0xf0; } int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi) { struct cx18 *cx = v4l2_get_subdevdata(sd); struct cx18_av_state *state = &cx->av_state; static const u16 lcr2vbi[] = { 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 0, V4L2_SLICED_WSS_625, 0, /* 4 */ V4L2_SLICED_CAPTION_525, /* 6 */ 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */ 0, 0, 0, 0 }; int is_pal = !(state->std & V4L2_STD_525_60); int i; memset(svbi->service_lines, 0, sizeof(svbi->service_lines)); svbi->service_set = 0; /* we're done if raw VBI is active */ if ((cx18_av_read(cx, 0x404) & 0x10) == 0) return 0; if (is_pal) { for (i = 7; i <= 23; i++) { u8 v = cx18_av_read(cx, 0x424 + i - 7); svbi->service_lines[0][i] = lcr2vbi[v >> 4]; svbi->service_lines[1][i] = lcr2vbi[v & 0xf]; svbi->service_set |= svbi->service_lines[0][i] | svbi->service_lines[1][i]; } } else { for (i = 10; i <= 21; i++) { u8 v = cx18_av_read(cx, 0x424 + i - 10); svbi->service_lines[0][i] = lcr2vbi[v >> 4]; svbi->service_lines[1][i] = lcr2vbi[v & 0xf]; svbi->service_set |= svbi->service_lines[0][i] | svbi->service_lines[1][i]; } } return 0; } int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt) { struct cx18 *cx = v4l2_get_subdevdata(sd); struct cx18_av_state *state = &cx->av_state; /* Setup standard */ cx18_av_std_setup(cx); /* VBI Offset */ cx18_av_write(cx, 0x47f, state->slicer_line_delay); cx18_av_write(cx, 0x404, 0x2e); return 0; } int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi) { struct cx18 *cx = v4l2_get_subdevdata(sd); struct cx18_av_state *state = &cx->av_state; int is_pal = !(state->std & V4L2_STD_525_60); int i, x; u8 lcr[24]; for (x = 0; x <= 23; x++) lcr[x] = 0x00; /* Setup standard */ cx18_av_std_setup(cx); /* Sliced VBI */ cx18_av_write(cx, 0x404, 0x32); /* Ancillary data */ cx18_av_write(cx, 0x406, 0x13); cx18_av_write(cx, 0x47f, state->slicer_line_delay); /* Force impossible lines to 0 */ if (is_pal) { for (i = 0; i <= 6; i++) svbi->service_lines[0][i] = svbi->service_lines[1][i] = 0; } else { for (i = 0; i <= 9; i++) svbi->service_lines[0][i] = svbi->service_lines[1][i] = 0; for (i = 22; i <= 23; i++) svbi->service_lines[0][i] = svbi->service_lines[1][i] = 0; } /* Build register values for requested service lines */ for (i = 7; i <= 23; i++) { for (x = 0; x <= 1; x++) { switch (svbi->service_lines[1-x][i]) { case V4L2_SLICED_TELETEXT_B: lcr[i] |= 1 << (4 * x); break; case V4L2_SLICED_WSS_625: lcr[i] |= 4 << (4 * x); break; case V4L2_SLICED_CAPTION_525: lcr[i] |= 6 << (4 * x); break; case V4L2_SLICED_VPS: lcr[i] |= 9 << (4 * x); break; } } } if (is_pal) { for (x = 1, i = 0x424; i <= 0x434; i++, x++) cx18_av_write(cx, i, lcr[6 + x]); } else { for (x = 1, i = 0x424; i <= 0x430; i++, x++) cx18_av_write(cx, i, lcr[9 + x]); for (i = 0x431; i <= 0x434; i++) cx18_av_write(cx, i, 0); } cx18_av_write(cx, 0x43c, 0x16); /* Should match vblank set in cx18_av_std_setup() */ cx18_av_write(cx, 0x474, is_pal ? 38 : 26); return 0; } int cx18_av_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi) { struct cx18 *cx = v4l2_get_subdevdata(sd); struct cx18_av_state *state = &cx->av_state; struct vbi_anc_data *anc = (struct vbi_anc_data *)vbi->p; u8 *p; int did, sdid, l, err = 0; /* * Check for the ancillary data header for sliced VBI */ if (anc->preamble[0] || anc->preamble[1] != 0xff || anc->preamble[2] != 0xff || (anc->did != sliced_vbi_did[0] && anc->did != sliced_vbi_did[1])) { vbi->line = vbi->type = 0; return 0; } did = anc->did; sdid = anc->sdid & 0xf; l = anc->idid[0] & 0x3f; l += state->slicer_line_offset; p = anc->payload; /* Decode the SDID set by the slicer */ switch (sdid) { case 1: sdid = V4L2_SLICED_TELETEXT_B; break; case 4: sdid = V4L2_SLICED_WSS_625; break; case 6: sdid = V4L2_SLICED_CAPTION_525; err = !odd_parity(p[0]) || !odd_parity(p[1]); break; case 9: sdid = V4L2_SLICED_VPS; if (decode_vps(p, p) != 0) err = 1; break; default: sdid = 0; err = 1; break; } vbi->type = err ? 0 : sdid; vbi->line = err ? 0 : l; vbi->is_second_field = err ? 0 : (did == sliced_vbi_did[1]); vbi->p = p; return 0; }
gpl-2.0
lumenosys/lumenosys-adi-linux
drivers/media/usb/dvb-usb-v2/au6610.c
4616
5236
/* * DVB USB Linux driver for Alcor Micro AU6610 DVB-T USB2.0. * * Copyright (C) 2006 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "au6610.h" #include "zl10353.h" #include "qt1010.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int au6610_usb_msg(struct dvb_usb_device *d, u8 operation, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { int ret; u16 index; u8 *usb_buf; /* * allocate enough for all known requests, * read returns 5 and write 6 bytes */ usb_buf = kmalloc(6, GFP_KERNEL); if (!usb_buf) return -ENOMEM; switch (wlen) { case 1: index = wbuf[0] << 8; break; case 2: index = wbuf[0] << 8; index += wbuf[1]; break; default: dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n", KBUILD_MODNAME, wlen); ret = -EINVAL; goto error; } ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation, USB_TYPE_VENDOR|USB_DIR_IN, addr << 1, index, usb_buf, 6, AU6610_USB_TIMEOUT); dvb_usb_dbg_usb_control_msg(d->udev, operation, (USB_TYPE_VENDOR|USB_DIR_IN), addr << 1, index, usb_buf, 6); if (ret < 0) goto error; switch (operation) { case AU6610_REQ_I2C_READ: case AU6610_REQ_USB_READ: /* requested value is always 5th byte in buffer */ rbuf[0] = usb_buf[4]; } error: kfree(usb_buf); return ret; } static int au6610_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 request; u8 wo = (rbuf == NULL || rlen == 0); /* write-only */ if (wo) { request = AU6610_REQ_I2C_WRITE; } else { /* rw */ request = AU6610_REQ_I2C_READ; } return au6610_usb_msg(d, request, addr, wbuf, wlen, rbuf, rlen); } /* I2C */ static int au6610_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) break; i++; } else if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 au6610_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm au6610_i2c_algo = { .master_xfer = au6610_i2c_xfer, .functionality = au6610_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config au6610_zl10353_config = { .demod_address = 0x0f, .no_tuner = 1, .parallel_ts = 1, }; static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe[0] = dvb_attach(zl10353_attach, &au6610_zl10353_config, &adap_to_d(adap)->i2c_adap); if (adap->fe[0] == NULL) return -ENODEV; return 0; } static struct qt1010_config au6610_qt1010_config = { .i2c_address = 0x62 }; static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(qt1010_attach, adap->fe[0], &adap_to_d(adap)->i2c_adap, &au6610_qt1010_config) == NULL ? -ENODEV : 0; } static int au6610_init(struct dvb_usb_device *d) { /* TODO: this functionality belongs likely to the streaming control */ /* bInterfaceNumber 0, bAlternateSetting 5 */ return usb_set_interface(d->udev, 0, 5); } static struct dvb_usb_device_properties au6610_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .i2c_algo = &au6610_i2c_algo, .frontend_attach = au6610_zl10353_frontend_attach, .tuner_attach = au6610_qt1010_tuner_attach, .init = au6610_init, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_ISOC(0x82, 5, 40, 942, 1), }, }, }; static const struct usb_device_id au6610_id_table[] = { { DVB_USB_DEVICE(USB_VID_ALCOR_MICRO, USB_PID_SIGMATEK_DVB_110, &au6610_props, "Sigmatek DVB-110", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, au6610_id_table); static struct usb_driver au6610_driver = { .name = KBUILD_MODNAME, .id_table = au6610_id_table, .probe = dvb_usbv2_probe, .disconnect = dvb_usbv2_disconnect, .suspend = dvb_usbv2_suspend, .resume = dvb_usbv2_resume, .reset_resume = dvb_usbv2_reset_resume, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(au6610_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
sub77/kernel_samsung_matissewifi
security/selinux/netlabel.c
5128
12487
/* * SELinux NetLabel Support * * This file provides the necessary glue to tie NetLabel into the SELinux * subsystem. * * Author: Paul Moore <paul@paul-moore.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2007, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/gfp.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/sock.h> #include <net/netlabel.h> #include <net/ip.h> #include <net/ipv6.h> #include "objsec.h" #include "security.h" #include "netlabel.h" /** * selinux_netlbl_sidlookup_cached - Cache a SID lookup * @skb: the packet * @secattr: the NetLabel security attributes * @sid: the SID * * Description: * Query the SELinux security server to lookup the correct SID for the given * security attributes. If the query is successful, cache the result to speed * up future lookups. Returns zero on success, negative values on failure. * */ static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb, struct netlbl_lsm_secattr *secattr, u32 *sid) { int rc; rc = security_netlbl_secattr_to_sid(secattr, sid); if (rc == 0 && (secattr->flags & NETLBL_SECATTR_CACHEABLE) && (secattr->flags & NETLBL_SECATTR_CACHE)) netlbl_cache_add(skb, secattr); return rc; } /** * selinux_netlbl_sock_genattr - Generate the NetLabel socket secattr * @sk: the socket * * Description: * Generate the NetLabel security attributes for a socket, making full use of * the socket's attribute cache. Returns a pointer to the security attributes * on success, NULL on failure. * */ static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk) { int rc; struct sk_security_struct *sksec = sk->sk_security; struct netlbl_lsm_secattr *secattr; if (sksec->nlbl_secattr != NULL) return sksec->nlbl_secattr; secattr = netlbl_secattr_alloc(GFP_ATOMIC); if (secattr == NULL) return NULL; rc = security_netlbl_sid_to_secattr(sksec->sid, secattr); if (rc != 0) { netlbl_secattr_free(secattr); return NULL; } sksec->nlbl_secattr = secattr; return secattr; } /** * selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache * * Description: * Invalidate the NetLabel security attribute mapping cache. * */ void selinux_netlbl_cache_invalidate(void) { netlbl_cache_invalidate(); } /** * selinux_netlbl_err - Handle a NetLabel packet error * @skb: the packet * @error: the error code * @gateway: true if host is acting as a gateway, false otherwise * * Description: * When a packet is dropped due to a call to avc_has_perm() pass the error * code to the NetLabel subsystem so any protocol specific processing can be * done. This is safe to call even if you are unsure if NetLabel labeling is * present on the packet, NetLabel is smart enough to only act when it should. * */ void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway) { netlbl_skbuff_err(skb, error, gateway); } /** * selinux_netlbl_sk_security_free - Free the NetLabel fields * @sksec: the sk_security_struct * * Description: * Free all of the memory in the NetLabel fields of a sk_security_struct. * */ void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec) { if (sksec->nlbl_secattr != NULL) netlbl_secattr_free(sksec->nlbl_secattr); } /** * selinux_netlbl_sk_security_reset - Reset the NetLabel fields * @sksec: the sk_security_struct * @family: the socket family * * Description: * Called when the NetLabel state of a sk_security_struct needs to be reset. * The caller is responsible for all the NetLabel sk_security_struct locking. * */ void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec) { sksec->nlbl_state = NLBL_UNSET; } /** * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel * @skb: the packet * @family: protocol family * @type: NetLabel labeling protocol type * @sid: the SID * * Description: * Call the NetLabel mechanism to get the security attributes of the given * packet and use those attributes to determine the correct context/SID to * assign to the packet. Returns zero on success, negative values on failure. * */ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u16 family, u32 *type, u32 *sid) { int rc; struct netlbl_lsm_secattr secattr; if (!netlbl_enabled()) { *sid = SECSID_NULL; return 0; } netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) rc = selinux_netlbl_sidlookup_cached(skb, &secattr, sid); else *sid = SECSID_NULL; *type = secattr.type; netlbl_secattr_destroy(&secattr); return rc; } /** * selinux_netlbl_skbuff_setsid - Set the NetLabel on a packet given a sid * @skb: the packet * @family: protocol family * @sid: the SID * * Description * Call the NetLabel mechanism to set the label of a packet using @sid. * Returns zero on success, negative values on failure. * */ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb, u16 family, u32 sid) { int rc; struct netlbl_lsm_secattr secattr_storage; struct netlbl_lsm_secattr *secattr = NULL; struct sock *sk; /* if this is a locally generated packet check to see if it is already * being labeled by it's parent socket, if it is just exit */ sk = skb->sk; if (sk != NULL) { struct sk_security_struct *sksec = sk->sk_security; if (sksec->nlbl_state != NLBL_REQSKB) return 0; secattr = sksec->nlbl_secattr; } if (secattr == NULL) { secattr = &secattr_storage; netlbl_secattr_init(secattr); rc = security_netlbl_sid_to_secattr(sid, secattr); if (rc != 0) goto skbuff_setsid_return; } rc = netlbl_skbuff_setattr(skb, family, secattr); skbuff_setsid_return: if (secattr == &secattr_storage) netlbl_secattr_destroy(secattr); return rc; } /** * selinux_netlbl_inet_conn_request - Label an incoming stream connection * @req: incoming connection request socket * * Description: * A new incoming connection request is represented by @req, we need to label * the new request_sock here and the stack will ensure the on-the-wire label * will get preserved when a full sock is created once the connection handshake * is complete. Returns zero on success, negative values on failure. * */ int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family) { int rc; struct netlbl_lsm_secattr secattr; if (family != PF_INET) return 0; netlbl_secattr_init(&secattr); rc = security_netlbl_sid_to_secattr(req->secid, &secattr); if (rc != 0) goto inet_conn_request_return; rc = netlbl_req_setattr(req, &secattr); inet_conn_request_return: netlbl_secattr_destroy(&secattr); return rc; } /** * selinux_netlbl_inet_csk_clone - Initialize the newly created sock * @sk: the new sock * * Description: * A new connection has been established using @sk, we've already labeled the * socket via the request_sock struct in selinux_netlbl_inet_conn_request() but * we need to set the NetLabel state here since we now have a sock structure. * */ void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family) { struct sk_security_struct *sksec = sk->sk_security; if (family == PF_INET) sksec->nlbl_state = NLBL_LABELED; else sksec->nlbl_state = NLBL_UNSET; } /** * selinux_netlbl_socket_post_create - Label a socket using NetLabel * @sock: the socket to label * @family: protocol family * * Description: * Attempt to label a socket using the NetLabel mechanism using the given * SID. Returns zero values on success, negative values on failure. * */ int selinux_netlbl_socket_post_create(struct sock *sk, u16 family) { int rc; struct sk_security_struct *sksec = sk->sk_security; struct netlbl_lsm_secattr *secattr; if (family != PF_INET) return 0; secattr = selinux_netlbl_sock_genattr(sk); if (secattr == NULL) return -ENOMEM; rc = netlbl_sock_setattr(sk, family, secattr); switch (rc) { case 0: sksec->nlbl_state = NLBL_LABELED; break; case -EDESTADDRREQ: sksec->nlbl_state = NLBL_REQSKB; rc = 0; break; } return rc; } /** * selinux_netlbl_sock_rcv_skb - Do an inbound access check using NetLabel * @sksec: the sock's sk_security_struct * @skb: the packet * @family: protocol family * @ad: the audit data * * Description: * Fetch the NetLabel security attributes from @skb and perform an access check * against the receiving socket. Returns zero on success, negative values on * error. * */ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, struct sk_buff *skb, u16 family, struct common_audit_data *ad) { int rc; u32 nlbl_sid; u32 perm; struct netlbl_lsm_secattr secattr; if (!netlbl_enabled()) return 0; netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) rc = selinux_netlbl_sidlookup_cached(skb, &secattr, &nlbl_sid); else nlbl_sid = SECINITSID_UNLABELED; netlbl_secattr_destroy(&secattr); if (rc != 0) return rc; switch (sksec->sclass) { case SECCLASS_UDP_SOCKET: perm = UDP_SOCKET__RECVFROM; break; case SECCLASS_TCP_SOCKET: perm = TCP_SOCKET__RECVFROM; break; default: perm = RAWIP_SOCKET__RECVFROM; } rc = avc_has_perm(sksec->sid, nlbl_sid, sksec->sclass, perm, ad); if (rc == 0) return 0; if (nlbl_sid != SECINITSID_UNLABELED) netlbl_skbuff_err(skb, rc, 0); return rc; } /** * selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel * @sock: the socket * @level: the socket level or protocol * @optname: the socket option name * * Description: * Check the setsockopt() call and if the user is trying to replace the IP * options on a socket and a NetLabel is in place for the socket deny the * access; otherwise allow the access. Returns zero when the access is * allowed, -EACCES when denied, and other negative values on error. * */ int selinux_netlbl_socket_setsockopt(struct socket *sock, int level, int optname) { int rc = 0; struct sock *sk = sock->sk; struct sk_security_struct *sksec = sk->sk_security; struct netlbl_lsm_secattr secattr; if (level == IPPROTO_IP && optname == IP_OPTIONS && (sksec->nlbl_state == NLBL_LABELED || sksec->nlbl_state == NLBL_CONNLABELED)) { netlbl_secattr_init(&secattr); lock_sock(sk); rc = netlbl_sock_getattr(sk, &secattr); release_sock(sk); if (rc == 0) rc = -EACCES; else if (rc == -ENOMSG) rc = 0; netlbl_secattr_destroy(&secattr); } return rc; } /** * selinux_netlbl_socket_connect - Label a client-side socket on connect * @sk: the socket to label * @addr: the destination address * * Description: * Attempt to label a connected socket with NetLabel using the given address. * Returns zero values on success, negative values on failure. * */ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr) { int rc; struct sk_security_struct *sksec = sk->sk_security; struct netlbl_lsm_secattr *secattr; if (sksec->nlbl_state != NLBL_REQSKB && sksec->nlbl_state != NLBL_CONNLABELED) return 0; local_bh_disable(); bh_lock_sock_nested(sk); /* connected sockets are allowed to disconnect when the address family * is set to AF_UNSPEC, if that is what is happening we want to reset * the socket */ if (addr->sa_family == AF_UNSPEC) { netlbl_sock_delattr(sk); sksec->nlbl_state = NLBL_REQSKB; rc = 0; goto socket_connect_return; } secattr = selinux_netlbl_sock_genattr(sk); if (secattr == NULL) { rc = -ENOMEM; goto socket_connect_return; } rc = netlbl_conn_setattr(sk, addr, secattr); if (rc == 0) sksec->nlbl_state = NLBL_CONNLABELED; socket_connect_return: bh_unlock_sock(sk); local_bh_enable(); return rc; }
gpl-2.0
guilhem/LGE975_G_Kitkat_Android_V20a_Kernel
drivers/staging/sbe-2t3e3/exar7250.c
8200
7282
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include "2t3e3.h" #include "ctrl.h" void exar7250_init(struct channel *sc) { exar7250_write(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, SBE_2T3E3_FRAMER_VAL_T3_CBIT | SBE_2T3E3_FRAMER_VAL_INTERRUPT_ENABLE_RESET | SBE_2T3E3_FRAMER_VAL_TIMING_ASYNCH_TXINCLK); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_IO_CONTROL, SBE_2T3E3_FRAMER_VAL_DISABLE_TX_LOSS_OF_CLOCK | SBE_2T3E3_FRAMER_VAL_DISABLE_RX_LOSS_OF_CLOCK | SBE_2T3E3_FRAMER_VAL_AMI_LINE_CODE | SBE_2T3E3_FRAMER_VAL_RX_LINE_CLOCK_INVERT); exar7250_set_frame_type(sc, SBE_2T3E3_FRAME_TYPE_T3_CBIT); } void exar7250_set_frame_type(struct channel *sc, u32 type) { u32 val; switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: break; default: return; } exar7250_stop_intr(sc, type); val = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE); val &= ~(SBE_2T3E3_FRAMER_VAL_LOCAL_LOOPBACK_MODE | SBE_2T3E3_FRAMER_VAL_T3_E3_SELECT | SBE_2T3E3_FRAMER_VAL_FRAME_FORMAT_SELECT); switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: val |= SBE_2T3E3_FRAMER_VAL_E3_G751; break; case SBE_2T3E3_FRAME_TYPE_E3_G832: val |= SBE_2T3E3_FRAMER_VAL_E3_G832; break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: val |= SBE_2T3E3_FRAMER_VAL_T3_CBIT; break; case SBE_2T3E3_FRAME_TYPE_T3_M13: val |= SBE_2T3E3_FRAMER_VAL_T3_M13; break; default: return; } exar7250_write(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, val); exar7250_start_intr(sc, type); } void exar7250_start_intr(struct channel *sc, u32 type) { u32 val; switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: val = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_CONFIGURATION_STATUS_2); #if 0 sc->s.LOS = val & SBE_2T3E3_FRAMER_VAL_E3_RX_LOS ? 1 : 0; #else cpld_LOS_update(sc); #endif sc->s.OOF = val & SBE_2T3E3_FRAMER_VAL_E3_RX_OOF ? 1 : 0; exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_1); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_1, SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_ENABLE); #if 0 /*SBE_2T3E3_FRAMER_VAL_E3_RX_COFA_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_LOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_AIS_INTERRUPT_ENABLE);*/ #endif exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_2); #if 0 exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_2, SBE_2T3E3_FRAMER_VAL_E3_RX_FEBE_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_FERF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_FRAMING_BYTE_ERROR_INTERRUPT_ENABLE); #endif break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: val = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_CONFIGURATION_STATUS); #if 0 sc->s.LOS = val & SBE_2T3E3_FRAMER_VAL_T3_RX_LOS ? 1 : 0; #else cpld_LOS_update(sc); #endif sc->s.OOF = val & SBE_2T3E3_FRAMER_VAL_T3_RX_OOF ? 1 : 0; exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_ENABLE, SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_ENABLE); #if 0 /* SBE_2T3E3_FRAMER_VAL_T3_RX_CP_BIT_ERROR_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_AIS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_IDLE_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_FERF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_AIC_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_P_BIT_INTERRUPT_ENABLE);*/ #endif exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS); #if 0 exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS, SBE_2T3E3_FRAMER_VAL_T3_RX_FEAC_REMOVE_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_FEAC_VALID_INTERRUPT_ENABLE); #endif exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL, 0); break; default: return; } exar7250_read(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_ENABLE, SBE_2T3E3_FRAMER_VAL_RX_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_TX_INTERRUPT_ENABLE); } void exar7250_stop_intr(struct channel *sc, u32 type) { exar7250_write(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_ENABLE, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_STATUS); switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_1, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_1); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_2, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_2); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_LAPD_CONTROL, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_LAPD_CONTROL); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_TX_LAPD_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_TX_LAPD_STATUS); break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_ENABLE, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_TX_FEAC_CONFIGURATION_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_TX_FEAC_CONFIGURATION_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_TX_LAPD_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_TX_LAPD_STATUS); break; } } void exar7250_unipolar_onoff(struct channel *sc, u32 mode) { switch (mode) { case SBE_2T3E3_OFF: exar7300_clear_bit(sc, SBE_2T3E3_FRAMER_REG_IO_CONTROL, SBE_2T3E3_FRAMER_VAL_UNIPOLAR); break; case SBE_2T3E3_ON: exar7300_set_bit(sc, SBE_2T3E3_FRAMER_REG_IO_CONTROL, SBE_2T3E3_FRAMER_VAL_UNIPOLAR); break; } } void exar7250_set_loopback(struct channel *sc, u32 mode) { switch (mode) { case SBE_2T3E3_FRAMER_VAL_LOOPBACK_OFF: exar7300_clear_bit(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, SBE_2T3E3_FRAMER_VAL_LOCAL_LOOPBACK_MODE); break; case SBE_2T3E3_FRAMER_VAL_LOOPBACK_ON: exar7300_set_bit(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, SBE_2T3E3_FRAMER_VAL_LOCAL_LOOPBACK_MODE); break; } }
gpl-2.0
Alex-the-black/android_kernel_huawei_msm8212
drivers/xen/xen-pciback/conf_space_capability.c
10248
4676
/* * PCI Backend - Handles the virtual fields found on the capability lists * in the configuration space. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/kernel.h> #include <linux/pci.h> #include "pciback.h" #include "conf_space.h" static LIST_HEAD(capabilities); struct xen_pcibk_config_capability { struct list_head cap_list; int capability; /* If the device has the capability found above, add these fields */ const struct config_field *fields; }; static const struct config_field caplist_header[] = { { .offset = PCI_CAP_LIST_ID, .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */ .u.w.read = xen_pcibk_read_config_word, .u.w.write = NULL, }, {} }; static inline void register_capability(struct xen_pcibk_config_capability *cap) { list_add_tail(&cap->cap_list, &capabilities); } int xen_pcibk_config_capability_add_fields(struct pci_dev *dev) { int err = 0; struct xen_pcibk_config_capability *cap; int cap_offset; list_for_each_entry(cap, &capabilities, cap_list) { cap_offset = pci_find_capability(dev, cap->capability); if (cap_offset) { dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n", cap->capability, cap_offset); err = xen_pcibk_config_add_fields_offset(dev, caplist_header, cap_offset); if (err) goto out; err = xen_pcibk_config_add_fields_offset(dev, cap->fields, cap_offset); if (err) goto out; } } out: return err; } static int vpd_address_write(struct pci_dev *dev, int offset, u16 value, void *data) { /* Disallow writes to the vital product data */ if (value & PCI_VPD_ADDR_F) return PCIBIOS_SET_FAILED; else return pci_write_config_word(dev, offset, value); } static const struct config_field caplist_vpd[] = { { .offset = PCI_VPD_ADDR, .size = 2, .u.w.read = xen_pcibk_read_config_word, .u.w.write = vpd_address_write, }, { .offset = PCI_VPD_DATA, .size = 4, .u.dw.read = xen_pcibk_read_config_dword, .u.dw.write = NULL, }, {} }; static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value, void *data) { int err; u16 real_value; err = pci_read_config_word(dev, offset, &real_value); if (err) goto out; *value = real_value & ~PCI_PM_CAP_PME_MASK; out: return err; } /* PM_OK_BITS specifies the bits that the driver domain is allowed to change. * Can't allow driver domain to enable PMEs - they're shared */ #define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK) static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value, void *data) { int err; u16 old_value; pci_power_t new_state, old_state; err = pci_read_config_word(dev, offset, &old_value); if (err) goto out; old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK); new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); new_value &= PM_OK_BITS; if ((old_value & PM_OK_BITS) != new_value) { new_value = (old_value & ~PM_OK_BITS) | new_value; err = pci_write_config_word(dev, offset, new_value); if (err) goto out; } /* Let pci core handle the power management change */ dev_dbg(&dev->dev, "set power state to %x\n", new_state); err = pci_set_power_state(dev, new_state); if (err) { err = PCIBIOS_SET_FAILED; goto out; } out: return err; } /* Ensure PMEs are disabled */ static void *pm_ctrl_init(struct pci_dev *dev, int offset) { int err; u16 value; err = pci_read_config_word(dev, offset, &value); if (err) goto out; if (value & PCI_PM_CTRL_PME_ENABLE) { value &= ~PCI_PM_CTRL_PME_ENABLE; err = pci_write_config_word(dev, offset, value); } out: return ERR_PTR(err); } static const struct config_field caplist_pm[] = { { .offset = PCI_PM_PMC, .size = 2, .u.w.read = pm_caps_read, }, { .offset = PCI_PM_CTRL, .size = 2, .init = pm_ctrl_init, .u.w.read = xen_pcibk_read_config_word, .u.w.write = pm_ctrl_write, }, { .offset = PCI_PM_PPB_EXTENSIONS, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { .offset = PCI_PM_DATA_REGISTER, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, {} }; static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = { .capability = PCI_CAP_ID_PM, .fields = caplist_pm, }; static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = { .capability = PCI_CAP_ID_VPD, .fields = caplist_vpd, }; int xen_pcibk_config_capability_init(void) { register_capability(&xen_pcibk_config_capability_vpd); register_capability(&xen_pcibk_config_capability_pm); return 0; }
gpl-2.0
surdupetru/tf300t-4.1bl
tools/perf/scripts/python/Perf-Trace-Util/Context.c
10760
2630
/* * Context.c. Python interfaces for perf script. * * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <Python.h> #include "../../../perf.h" #include "../../../util/trace-event.h" PyMODINIT_FUNC initperf_trace_context(void); static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args) { static struct scripting_context *scripting_context; PyObject *context; int retval; if (!PyArg_ParseTuple(args, "O", &context)) return NULL; scripting_context = PyCObject_AsVoidPtr(context); retval = common_pc(scripting_context); return Py_BuildValue("i", retval); } static PyObject *perf_trace_context_common_flags(PyObject *self, PyObject *args) { static struct scripting_context *scripting_context; PyObject *context; int retval; if (!PyArg_ParseTuple(args, "O", &context)) return NULL; scripting_context = PyCObject_AsVoidPtr(context); retval = common_flags(scripting_context); return Py_BuildValue("i", retval); } static PyObject *perf_trace_context_common_lock_depth(PyObject *self, PyObject *args) { static struct scripting_context *scripting_context; PyObject *context; int retval; if (!PyArg_ParseTuple(args, "O", &context)) return NULL; scripting_context = PyCObject_AsVoidPtr(context); retval = common_lock_depth(scripting_context); return Py_BuildValue("i", retval); } static PyMethodDef ContextMethods[] = { { "common_pc", perf_trace_context_common_pc, METH_VARARGS, "Get the common preempt count event field value."}, { "common_flags", perf_trace_context_common_flags, METH_VARARGS, "Get the common flags event field value."}, { "common_lock_depth", perf_trace_context_common_lock_depth, METH_VARARGS, "Get the common lock depth event field value."}, { NULL, NULL, 0, NULL} }; PyMODINIT_FUNC initperf_trace_context(void) { (void) Py_InitModule("perf_trace_context", ContextMethods); }
gpl-2.0
spiderworthy/linux
fs/squashfs/id.c
12808
3008
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * id.c */ /* * This file implements code to handle uids and gids. * * For space efficiency regular files store uid and gid indexes, which are * converted to 32-bit uids/gids using an id look up table. This table is * stored compressed into metadata blocks. A second index table is used to * locate these. This second index table for speed of access (and because it * is small) is read at mount time and cached in memory. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/slab.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" /* * Map uid/gid index into real 32-bit uid/gid using the id look up table */ int squashfs_get_id(struct super_block *sb, unsigned int index, unsigned int *id) { struct squashfs_sb_info *msblk = sb->s_fs_info; int block = SQUASHFS_ID_BLOCK(index); int offset = SQUASHFS_ID_BLOCK_OFFSET(index); u64 start_block = le64_to_cpu(msblk->id_table[block]); __le32 disk_id; int err; err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, sizeof(disk_id)); if (err < 0) return err; *id = le32_to_cpu(disk_id); return 0; } /* * Read uncompressed id lookup table indexes from disk into memory */ __le64 *squashfs_read_id_index_table(struct super_block *sb, u64 id_table_start, u64 next_table, unsigned short no_ids) { unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); __le64 *table; TRACE("In read_id_index_table, length %d\n", length); /* Sanity check values */ /* there should always be at least one id */ if (no_ids == 0) return ERR_PTR(-EINVAL); /* * length bytes should not extend into the next table - this check * also traps instances where id_table_start is incorrectly larger * than the next table start */ if (id_table_start + length > next_table) return ERR_PTR(-EINVAL); table = squashfs_read_table(sb, id_table_start, length); /* * table[0] points to the first id lookup table metadata block, this * should be less than id_table_start */ if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { kfree(table); return ERR_PTR(-EINVAL); } return table; }
gpl-2.0
schqiushui/kernel_kk444_sense_a31
arch/arm/mvp/pvtcpkm/pvtcp.c
9
9248
/* * Linux 2.6.32 and later Kernel module for VMware MVP PVTCP Server * * Copyright (C) 2010-2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #line 5 #include "pvtcp.h" CommOperationFunc pvtcpOperations[] = { [PVTCP_OP_FLOW] = PvtcpFlowOp, [PVTCP_OP_IO] = PvtcpIoOp, [PVTCP_OP_CREATE] = PvtcpCreateOp, [PVTCP_OP_RELEASE] = PvtcpReleaseOp, [PVTCP_OP_BIND] = PvtcpBindOp, [PVTCP_OP_LISTEN] = PvtcpListenOp, [PVTCP_OP_ACCEPT] = PvtcpAcceptOp, [PVTCP_OP_CONNECT] = PvtcpConnectOp, [PVTCP_OP_SHUTDOWN] = PvtcpShutdownOp, [PVTCP_OP_SETSOCKOPT] = PvtcpSetSockOptOp, [PVTCP_OP_GETSOCKOPT] = PvtcpGetSockOptOp, [PVTCP_OP_IOCTL] = PvtcpIoctlOp, [PVTCP_OP_INVALID] = NULL }; CommImpl pvtcpImpl = { .owner = NULL, .checkArgs = PvtcpCheckArgs, .stateCtor = PvtcpStateAlloc, .stateDtor = PvtcpStateFree, .dataAlloc = PvtcpBufAlloc, .dataFree = PvtcpBufFree, .operations = pvtcpOperations, .closeNtf = PvtcpCloseNtf, .closeNtfData = &pvtcpImpl, .ntfCenterID = { { .d32[0] = 2U, .d32[1] = 10000 } } }; const char *pvtcpVersions[] = { [PVTCP_VERS_1_1] = PVTCP_COMM_IMPL_VERS_1_1, [PVTCP_VERS_1_0] = PVTCP_COMM_IMPL_VERS_1_0 }; const unsigned int pvtcpVersionsSize = (sizeof(pvtcpVersions) / sizeof(pvtcpVersions[0])); CommChannel pvtcpClientChannel; static PvtcpIfConf ifUnbound = { .family = PVTCP_PF_UNBOUND }; const PvtcpIfConf *pvtcpIfUnbound = &ifUnbound; static PvtcpIfConf ifDeathRow = { .family = PVTCP_PF_DEATH_ROW }; const PvtcpIfConf *pvtcpIfDeathRow = &ifDeathRow; static PvtcpIfConf ifLoopbackInet4 = { .family = PVTCP_PF_LOOPBACK_INET4 }; const PvtcpIfConf *pvtcpIfLoopbackInet4 = &ifLoopbackInet4; static int IfCheck(const PvtcpIfConf *conf) { if (!conf || ((conf->family != PF_INET) && (conf->family != PF_INET6) && (conf->family != PVTCP_PF_UNBOUND) && (conf->family != PVTCP_PF_DEATH_ROW) && (conf->family != PVTCP_PF_LOOPBACK_INET4))) return -1; return 0; } static int IfRestrictedCheck(const PvtcpIfConf *conf) { if (IfCheck(conf) || ((conf->family != PF_INET) && (conf->family != PF_INET6))) return -1; return 0; } PvtcpIf * PvtcpStateFindIf(PvtcpState *state, const PvtcpIfConf *conf) { PvtcpIf *netif; if (!state) return NULL; if (conf->family == PVTCP_PF_UNBOUND) return &state->ifUnbound; if (conf->family == PVTCP_PF_DEATH_ROW) return &state->ifDeathRow; if (conf->family == PVTCP_PF_LOOPBACK_INET4) return &state->ifLoopbackInet4; CommOS_ListForEach(&state->ifList, netif, stateLink) { if (netif->conf.family == conf->family) { if ((conf->family == PF_INET && !memcmp(&netif->conf.addr.in, &conf->addr.in, sizeof(conf->addr.in))) || (conf->family == PF_INET6 && !memcmp(&netif->conf.addr.in6, &conf->addr.in6, sizeof(conf->addr.in6)))) return netif; } } return NULL; } int PvtcpStateAddIf(CommChannel channel, const PvtcpIfConf *conf) { int rc = -1; PvtcpState *state; PvtcpIf *netif; if (!channel || IfRestrictedCheck(conf)) return rc; if (CommSvc_Lock(channel)) return rc; state = CommSvc_GetState(channel); if (!state) goto out; if (PvtcpStateFindIf(state, conf)) goto out; netif = CommOS_Kmalloc(sizeof(*netif)); if (!netif) goto out; INIT_LIST_HEAD(&netif->stateLink); INIT_LIST_HEAD(&netif->sockList); netif->state = state; netif->conf = *conf; CommOS_ListAddTail(&state->ifList, &netif->stateLink); rc = 0; out: CommSvc_Unlock(channel); return rc; } static void IfReleaseSockets(PvtcpIf *netif) { PvtcpSock *pvsk; PvtcpSock *tmp; if (netif) { CommOS_ListForEachSafe(&netif->sockList, pvsk, tmp, ifLink) { CommOS_ListDel(&pvsk->ifLink); PvtcpReleaseSocket(pvsk); } } } static void IfFree(PvtcpIf *netif) { if (netif) { CommOS_ListDel(&netif->stateLink); CommOS_Kfree(netif); } } void PvtcpStateRemoveIf(CommChannel channel, const PvtcpIfConf *conf) { PvtcpState *state; PvtcpIf *netif; if (!channel || IfRestrictedCheck(conf)) return; if (CommSvc_Lock(channel)) return; state = CommSvc_GetState(channel); if (state) { netif = PvtcpStateFindIf(state, conf); if (netif && netif->state == state) { IfReleaseSockets(netif); if ((netif->conf.family != PVTCP_PF_UNBOUND) && (netif->conf.family != PVTCP_PF_DEATH_ROW) && (netif->conf.family != PVTCP_PF_LOOPBACK_INET4)) IfFree(netif); } } CommSvc_Unlock(channel); } int PvtcpStateAddSocket(CommChannel channel, const PvtcpIfConf *conf, PvtcpSock *sock) { int rc = -1; PvtcpState *state; PvtcpIf *netif; if (!channel || !sock || (sock->channel != channel) || IfCheck(conf)) return rc; if (CommSvc_Lock(channel)) return rc; state = CommSvc_GetState(channel); if (!state) goto out; netif = PvtcpStateFindIf(state, conf); if (!netif) goto out; CommOS_ListDel(&sock->ifLink); sock->netif = netif; CommOS_ListAddTail(&netif->sockList, &sock->ifLink); rc = 0; out: CommSvc_Unlock(channel); return rc; } int PvtcpStateRemoveSocket(CommChannel channel, PvtcpSock *sock) { if (!channel || !sock || (sock->channel && (sock->channel != channel))) return -1; if (CommSvc_Lock(channel)) return -1; CommOS_ListDel(&sock->ifLink); CommSvc_Unlock(channel); return 0; } void * PvtcpStateAlloc(CommChannel channel) { PvtcpState *state; state = CommOS_Kmalloc(sizeof(*state)); if (state) { state->channel = channel; INIT_LIST_HEAD(&state->ifList); INIT_LIST_HEAD(&state->ifDeathRow.stateLink); INIT_LIST_HEAD(&state->ifDeathRow.sockList); state->ifDeathRow.state = state; state->ifDeathRow.conf.family = PVTCP_PF_DEATH_ROW; INIT_LIST_HEAD(&state->ifUnbound.stateLink); INIT_LIST_HEAD(&state->ifUnbound.sockList); state->ifUnbound.state = state; state->ifUnbound.conf.family = PVTCP_PF_UNBOUND; INIT_LIST_HEAD(&state->ifLoopbackInet4.stateLink); INIT_LIST_HEAD(&state->ifLoopbackInet4.sockList); state->ifLoopbackInet4.state = state; state->ifLoopbackInet4.conf.family = PVTCP_PF_LOOPBACK_INET4; state->namespace = NULL; state->mask = ((unsigned int)channel << 4) ^ (unsigned int)state; #if defined(__linux__) #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) #define prandom_u32 random32 #endif state->id = ((unsigned long long)prandom_u32() << 32) | (unsigned long long)prandom_u32(); #else state->id = (unsigned long long)state; #endif } return state; } void PvtcpStateFree(void *arg) { PvtcpState *state = arg; PvtcpIf *netif; PvtcpIf *tmp; if (state) { CommOS_ListForEachSafe(&state->ifList, netif, tmp, stateLink) { IfReleaseSockets(netif); IfFree(netif); } IfReleaseSockets(&state->ifLoopbackInet4); IfReleaseSockets(&state->ifUnbound); IfReleaseSockets(&state->ifDeathRow); CommOS_Kfree(state); } } int PvtcpCheckArgs(CommTranspInitArgs *transpArgs) { int rc = -1; const unsigned int minCapacity = (PVTCP_SOCK_BUF_SIZE + sizeof(CommPacket)) * 2; unsigned int versionIndex = pvtcpVersionsSize; if (transpArgs->capacity < minCapacity) return rc; while (versionIndex--) { if (transpArgs->type == CommTransp_GetType(pvtcpVersions[versionIndex])) { transpArgs->type = versionIndex; rc = 0; break; } } return rc; } void PvtcpCloseNtf(void *ntfData, const CommTranspInitArgs *transpArgs, int inBH) { CommImpl *impl = (CommImpl *)ntfData; pvtcpClientChannel = NULL; commos_info("%s: Channel was reset!\n", __func__); if (impl && !impl->owner && !inBH) { commos_info("%s: Attempting to re-initialize channel.\n", __func__); impl->openAtMillis = CommOS_GetCurrentMillis(); impl->openTimeoutAtMillis = CommOS_GetCurrentMillis() + PVTCP_CHANNEL_OPEN_TIMEOUT; if (CommSvc_Alloc(transpArgs, impl, inBH, &pvtcpClientChannel)) commos_info("%s: Failed to initialize channel!\n", __func__); } } int PvtcpSockInit(PvtcpSock *pvsk, CommChannel channel) { PvtcpState *state; int rc = -1; if (pvsk && channel) { state = CommSvc_GetState(channel); if (state) { CommOS_MutexInit(&pvsk->inLock); CommOS_MutexInit(&pvsk->outLock); CommOS_SpinlockInit(&pvsk->stateLock); CommOS_ListInit(&pvsk->ifLink); CommOS_InitWork(&pvsk->work, PvtcpProcessAIO); pvsk->netif = NULL; pvsk->state = state; pvsk->stateID = state->id; pvsk->channel = channel; pvsk->peerSock = PVTCP_PEER_SOCK_NULL; pvsk->peerSockSet = 0; CommOS_WriteAtomic(&pvsk->deltaAckSize, (1 << PVTCP_SOCK_SMALL_ACK_ORDER)); CommOS_WriteAtomic(&pvsk->rcvdSize, 0); CommOS_WriteAtomic(&pvsk->sentSize, 0); CommOS_WriteAtomic(&pvsk->queueSize, 0); CommOS_ListInit(&pvsk->queue); pvsk->rpcReply = NULL; pvsk->rpcStatus = 0; pvsk->err = 0; rc = 0; } } return rc; }
gpl-2.0
XePeleato/android_ALE-L21_kernel
drivers/hisi/modem_hi6xxx/taf/comm/src/acore/at/src/AtParseCmd.c
9
19415
/************************************************************************ Copyright : 2005-2007, Huawei Tech. Co., Ltd. File name : AtParseCmd.c Author : --- Version : V100R001 Date : 2005-04-19 Description : ¸ÃCÎļþ¸ø³öÁË---Ä£¿éµÄʵÏÖ Function List: At_FindNextSubState At_FindNextMainState At_atou32 At_RangeToU32 At_RangeCopy History : 1. Date:2005-04-19 Author: --- Modification:Create 2. Date:2005-04-19 Author: --- Modification:Add function ... ÎÊÌâµ¥ºÅ: 3.ÈÕ ÆÚ : 2007-03-27 ×÷ Õß : h59254 ÐÞ¸ÄÄÚÈÝ : ÎÊÌâµ¥ºÅ:A32D09820(PC-LintÐÞ¸Ä) 4.ÈÕ ÆÚ : 2007Äê10ÔÂ11ÈÕ ×÷ Õß : x68770 ÐÞ¸ÄÄÚÈÝ : ¸ù¾ÝÎÊÌâµ¥ºÅA32D13027 ************************************************************************/ /***************************************************************************** 1 Í·Îļþ°üº¬ *****************************************************************************/ #include "ATCmdProc.h" #include "AtCheckFunc.h" #include "AtParseCmd.h" #include "at_common.h" #ifdef __cplusplus #if __cplusplus extern "C"{ #endif #endif /***************************************************************************** ЭÒéÕ»´òÓ¡´òµã·½Ê½ÏµÄ.CÎļþºê¶¨Òå *****************************************************************************/ /*lint -e767 -e960 ÐÞ¸ÄÈË:ÂÞ½¨ 107747;¼ìÊÓÈË:ËïÉÙ»ª65952;Ô­Òò:Log´òÓ¡*/ #define THIS_FILE_ID PS_FILE_ID_AT_PARSECMD_C /*lint +e767 +e960 ÐÞ¸ÄÈË:ÂÞ½¨ 107747;¼ìÊÓÈË:sunshaohua*/ /***************************************************************************** 2 È«¾Ö±äÁ¿¶¨Òå *****************************************************************************/ /***************************************************************************** 3 º¯Êý¡¢±äÁ¿ÉùÃ÷ *****************************************************************************/ /***************************************************************************** 4 º¯ÊýʵÏÖ *****************************************************************************/ /****************************************************************************** º¯ÊýÃû³Æ: atFindNextSubState ¹¦ÄÜÃèÊö: ¸ù¾ÝÊäÈë×Ö·û,ÒÀ´ÎÆ¥ÅäÖ¸¶¨×Ó״̬±íµÄÿһÏîµÄÅжϺ¯Êý,Èç¹û³É¹¦, Ôò·µ»Ø¶ÔÓ¦µÄ×Ó״̬ ²ÎÊý˵Ã÷: pSubStateTab [in] ָʾ±»Æ¥ÅäµÄ×Ó״̬±í ucInputChar [in] ָʾ±»Æ¥ÅäµÄ×Ö·û ·µ »Ø Öµ: ·µ»ØÆ¥ÅäµÄ×Ó״̬ µ÷ÓÃÒªÇó: TODO: ... µ÷ÓþÙÀý: TODO: ... ×÷ Õß: ´Þ¾üÇ¿/00064416 [2009-08-11] ******************************************************************************/ AT_STATE_TYPE_ENUM atFindNextSubState( AT_SUB_STATE_STRU *pSubStateTab,VOS_UINT8 ucInputChar) { VOS_UINT16 usTabIndex = 0; /* ×Ó״̬±íË÷Òý */ /* ÒÀ´Î±È½Ï×Ó״̬µÄÿһÏîÖ±ÖÁ½áÊø */ while(AT_BUTT_STATE != pSubStateTab[usTabIndex].next_state) { if( AT_SUCCESS == pSubStateTab[usTabIndex].pFuncName(ucInputChar)) /* ÅжÏÊäÈë×Ö·ûÊÇ·ñÆ¥Åä */ { return pSubStateTab[usTabIndex].next_state; /* ·µ»ØÆ¥ÅäµÄ×Ó״̬ */ } usTabIndex++; /* ×Ó״̬±íË÷ÒýµÝÔö */ } return AT_BUTT_STATE; } /****************************************************************************** º¯ÊýÃû³Æ: atFindNextMainState ¹¦ÄÜÃèÊö: ¸ù¾ÝÊäÈë״̬,ÒÀ´ÎÆ¥ÅäÖ¸¶¨Ö÷״̬±íÿһÏîµÄ״̬,Èç¹û³É¹¦,Ôò·µ »Ø¶ÔÓ¦µÄ×Ó״̬±í,ÔÙ¸ù¾ÝÊäÈë×Ö·ûºÍ×Ó״̬±íµÃ³öÏÂÒ»¸öÖ÷״̬ ²ÎÊý˵Ã÷: pMainStateTab [in] ָʾ±»Æ¥ÅäµÄÖ÷״̬±í ucInputChar [in] ָʾ±»Æ¥ÅäµÄ×Ö·û InputState [in] ÊäÈë״̬ ·µ »Ø Öµ: Èç¹ûÆ¥Åä³É¹¦£¬·µ»ØatFindNextSubStateº¯Êý½á¹û Èç¹ûÆ¥Åäʧ°Ü£¬·µ»ØAT_BUTT_STATE µ÷ÓÃÒªÇó: TODO: ... µ÷ÓþÙÀý: TODO: ... ×÷ Õß: ´Þ¾üÇ¿/00064416 [2009-08-11] ******************************************************************************/ AT_STATE_TYPE_ENUM atFindNextMainState(AT_MAIN_STATE_STRU *pMainStateTab, VOS_UINT8 ucInputChar, AT_STATE_TYPE_ENUM InputState) { VOS_UINT16 usTabIndex = 0; /* ×Ó״̬±íË÷Òý */ /* ÒÀ´Î±È½ÏÖ÷״̬µÄÿһÏîÖ±ÖÁ½áÊø */ while(AT_BUTT_STATE != pMainStateTab[usTabIndex].curr_state) { if( InputState == pMainStateTab[usTabIndex].curr_state) /* ÅжÏÊäÈë״̬ÊÇ·ñÆ¥Åä */ { /* Èç¹û״̬ƥÅä,Ôò¸ù¾ÝÊäÈë×Ö·ûѰÕÒÏÂÒ»¸ö×Ó״̬ */ return atFindNextSubState(pMainStateTab[usTabIndex].pSubStateTab,ucInputChar); } usTabIndex++; } return AT_BUTT_STATE; } /***************************************************************************** Prototype : At_Auc2ul Description : °Ñ×Ö·û´®×ª³ÉÎÞ·ûºÅÕûÐÍÖµ Input : nptr ---ָʾ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· Output : --- Return Value : AT_SUCCESS --- ³É¹¦ AT_FAILURE --- ʧ°Ü Calls : --- Called By : --- History : --- 1.Date : 2005-04-19 Author : --- Modification: Created function 2.ÈÕ ÆÚ : 2007-03-27 ×÷ Õß : h59254 ÐÞ¸ÄÄÚÈÝ : ÎÊÌâµ¥ºÅ:A32D09820(PC-LintÐÞ¸Ä) *****************************************************************************/ TAF_UINT32 At_Auc2ul(TAF_UINT8 *nptr,TAF_UINT16 usLen,TAF_UINT32 *pRtn) { TAF_UINT32 c = 0; /* current Char */ TAF_UINT32 total = 0; /* current total */ TAF_UINT8 Length = 0; /* current Length */ c = (TAF_UINT32)*nptr++; while(Length++ < usLen) { if((c >= '0') && (c <= '9')) /* ×Ö·û¼ì²é */ { /* 0xFFFFFFFF = 4294967295 */ if(((total == 429496729) && (c > '5')) || (total > 429496729)) { return AT_FAILURE; } total = (10 * total) + (c - '0'); /* accumulate digit */ c = (TAF_UINT32)(TAF_UINT8)*nptr++; /* get next Char */ } else { return AT_FAILURE; } } *pRtn = total; /* return result, negated if necessary */ return AT_SUCCESS; } /***************************************************************************** Prototype : At_String2Hex Description : Íê³É×Ö·û´®×ª16½øÖÆÊý¹¦ÄÜ Input : nptr --- ×Ö·û´® Output : Return Value : AT_SUCCESS --- ³É¹¦ AT_FAILURE --- ʧ°Ü Calls : --- Called By : --- History : --- 1.Date : 2005-04-19 Author : --- Modification: Created function 2.ÈÕ ÆÚ : 2007-03-27 ×÷ Õß : h59254 ÐÞ¸ÄÄÚÈÝ : ÎÊÌâµ¥ºÅ:A32D09820(PC-LintÐÞ¸Ä) *****************************************************************************/ TAF_UINT32 At_String2Hex(TAF_UINT8 *nptr,TAF_UINT16 usLen,TAF_UINT32 *pRtn) { TAF_UINT32 c = 0; /* current Char */ TAF_UINT32 total = 0; /* current total */ TAF_UINT8 Length = 0; /* current Length */ c = (TAF_UINT32)*nptr++; while(Length++ < usLen) { if( (c >= '0') && (c <= '9') ) { c = c - '0'; } else if( (c >= 'a') && (c <= 'f') ) { c = (c - 'a') + 10; } else if( (c >= 'A') && (c <= 'F') ) { c = (c - 'A') + 10; } else { return AT_FAILURE; } if(total > 0x0FFFFFFF) /* ·¢Éú·´×ª */ { return AT_FAILURE; } else { total = (total << 4) + c; /* accumulate digit */ c = (TAF_UINT32)(TAF_UINT8)*nptr++; /* get next Char */ } } *pRtn = total; /* return result, negated if necessary */ return AT_SUCCESS; } /***************************************************************************** Prototype : At_RangeToU32 Description : °Ñ×Ö·û´®ÖеÄijһ¶Îת³ÉÎÞ·ûºÅÕûÐÍÖµ,pBegainָʾ¿ªÊ¼µØÖ·,pucEnd ʾ½áÊøµØÖ· Input : pucBegain --- ָʾ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· pucEnd --- ָʾ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· Output : --- Return Value : AT_SUCCESS --- ³É¹¦ AT_FAILURE --- ʧ°Ü Calls : --- Called By : --- History : --- 1.Date : 2005-04-19 Author : --- Modification: Created function 2.ÈÕ ÆÚ : 2007-03-27 ×÷ Õß : h59254 ÐÞ¸ÄÄÚÈÝ : ÎÊÌâµ¥ºÅ:A32D09820(PC-LintÐÞ¸Ä) *****************************************************************************/ TAF_UINT32 At_RangeToU32(TAF_UINT8 * pucBegain, TAF_UINT8 * pucEnd) { TAF_UINT32 c; /* current Char */ TAF_UINT32 total = 0; /* current total */ /* ÊäÈë²ÎÊý¼ì²é */ if(pucBegain >= pucEnd) { return total; } /* ´ÓµÚÒ»¸ö×Ö·û¿ªÊ¼ */ c = (TAF_UINT32)*pucBegain; /* ÒÀ´ÎÀÛ¼Ó*10½á¹û,Ö±ÖÁ½áÊø */ while( (pucBegain != pucEnd) && ( (c >= '0') && (c <= '9') )) { total = (10 * total) + (c - '0'); /* accumulate digit */ pucBegain++; /* ×¢Ò⣬±ØÐëÔÚ¸³ÖµÖ®Ç°ÒÆÎ»£¬·ñÔò£¬±»¸³ÖµÁ½±é */ c = (TAF_UINT32)(TAF_UINT8)*pucBegain; /* get next Char */ if(total >= 0x19999998) /* Èç¹û´óÓÚ0x19999998£¬Ö±½Ó·µ»Ø£¬·ñÔò·´×ª */ { return total; } } return total; } /***************************************************************************** Prototype : At_RangeCopy Description : °Ñ×Ö·û´®ÖеÄijһ¶Î¿½±´µ½Ö¸¶¨µØÖ·,pDstָʾĿµÄµØÖ·,pucBegain ָʾ¿ªÊ¼µØÖ·,pEndָʾ½áÊøµØÖ· Input : pucDst --- Ä¿µÄµØÖ· pucBegain --- ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· pucEnd --- ±»×ª»»×Ö´®µÄ½áÊøµØÖ· Output : --- Return Value : --- Calls : --- Called By : --- History : --- 1.Date : 2005-04-19 Author : --- Modification: Created function *****************************************************************************/ TAF_VOID At_RangeCopy(TAF_UINT8 *pucDst,TAF_UINT8 * pucBegain, TAF_UINT8 * pucEnd) { /* ÒÀ´Î¿½±´µ½Ä¿µÄµØÖ·,Ö±ÖÁ½áÊø */ while(pucBegain < pucEnd) { *pucDst++ = *pucBegain++; } } /***************************************************************************** Prototype : At_UpString Description : ×Ö·û´®´óдת»» Input : pData --- Ä¿µÄµØÖ· usLen --- ³¤¶È Output : --- Return Value : AT_SUCCESS --- ³É¹¦ AT_FAILURE --- ʧ°Ü Calls : --- Called By : --- History : --- 1.Date : 2005-04-19 Author : --- Modification: Created function 2.ÈÕ ÆÚ : 2007-03-27 ×÷ Õß : h59254 ÐÞ¸ÄÄÚÈÝ : ÎÊÌâµ¥ºÅ:A32D09820(PC-LintÐÞ¸Ä) *****************************************************************************/ TAF_UINT32 At_UpString(TAF_UINT8 *pData,TAF_UINT16 usLen) { TAF_UINT8 *pTmp = pData; /* current Char */ TAF_UINT16 ChkLen = 0; if(0 == usLen) { return AT_FAILURE; } while(ChkLen++ < usLen) { if ( (*pTmp >= 'a') && (*pTmp <= 'z')) { *pTmp = *pTmp - 0x20; } pTmp++; } return AT_SUCCESS; } /****************************************************************************** º¯ÊýÃû³Æ: atRangeToU32 ¹¦ÄÜÃèÊö: °Ñ×Ö·û´®ÖеÄijһ¶Îת³ÉÎÞ·ûºÅÕûÐÍÖµ ²ÎÊý˵Ã÷: pucBegain [in/out] ָʾ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· pucEnd [in/out] ָʾ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· ·µ »Ø Öµ: ·µ»Ø×ª»»ËùµÃÕûÐÍÖµ ×÷ Õß: ´Þ¾üÇ¿/00064416 [2009-08-11] ******************************************************************************/ VOS_UINT32 atRangeToU32( VOS_UINT8 *pucBegain, VOS_UINT8 *pucEnd) { VOS_UINT32 total = 0; /* current total */ VOS_UINT32 ulRst; /* ÊäÈë²ÎÊý¼ì²é */ if(pucBegain >= pucEnd) { return total; } ulRst = atAuc2ul(pucBegain, (VOS_UINT16)(pucEnd - pucBegain), &total); if(AT_SUCCESS != ulRst) { total = 0; } return total; } /****************************************************************************** º¯ÊýÃû³Æ: atRangeCopy ¹¦ÄÜÃèÊö: °Ñ×Ö·û´®ÖеÄijһ¶Î¿½±´µ½Ö¸¶¨µØÖ· ²ÎÊý˵Ã÷: pucDst [in/out] Ä¿µÄµØÖ· pucBegain [in/out] ±»×ª»»×Ö´®µÄ¿ªÊ¼µØÖ· pucEnd [in/out] ±»×ª»»×Ö´®µÄ½áÊøµØÖ· ·µ »Ø Öµ: TODO: ... µ÷ÓÃÒªÇó: TODO: ... µ÷ÓþÙÀý: TODO: ... ×÷ Õß: ´Þ¾üÇ¿/00064416 [2009-08-11] ******************************************************************************/ VOS_VOID atRangeCopy( VOS_UINT8 *pucDst, VOS_UINT8 * pucBegain, VOS_UINT8 * pucEnd) { /* ÒÀ´Î¿½±´µ½Ä¿µÄµØÖ·,Ö±ÖÁ½áÊø */ while(pucBegain < pucEnd) { *pucDst++ = *pucBegain++; } } /****************************************************************************** ¹¦ÄÜÃèÊö: °ÑÊ®Áù½øÖÆ×Ö·û´®×ª³ÉÎÞ·ûºÅÕûÐÍÖµ ²ÎÊý˵Ã÷: nptr [in/out] ÊäÈëµÄ×Ö·û´®ÄÚÈÝÖ¸Õë usLen [in] ÊäÈëµÄ×Ö·û´®³¤¶È pRtn [in/out] ÓÉ×Ö·û´®×ª»»ËùµÃÕûÐÍÖµ ·µ »Ø Öµ: AT_FAILURE: ÊäÈë×Ö·û´®ÖÐÓзÇÊý×Ö×Ö·û£¬»òÊýÖµÒç³ö AT_SUCCESS: ³É¹¦ ******************************************************************************/ static VOS_UINT32 auc2ulHex( VOS_UINT8 *nptr, VOS_UINT16 usLen, VOS_UINT32 *pRtn) { VOS_UINT8 c = 0; /* current Char */ VOS_UINT32 total = 0; /* current total */ VOS_UINT16 usLength = 2; /* current Length */ VOS_UINT8 *pcTmp = nptr + 2; /* ´Ó0xºó¿ªÊ¼±È½Ï */ /* ²ÎÊýÖ¸ÕëÓɵ÷ÓÃÕß±£Ö¤²»ÎªNULL, ¸Ã´¦²»×öÅÐ¶Ï */ c = *pcTmp++; while(usLength++ < usLen) { /* 0xFFFFFFFF */ if(total > 0xFFFFFFF) { return AT_FAILURE; } /* ×Ö·û¼ì²é */ if(isdigit(c)) { total = AT_CHECK_BASE_HEX * total + (c - '0'); /* accumulate digit */ c = *pcTmp++; /* get next Char */ } else if('A' <= c && 'F' >= c) { total = AT_CHECK_BASE_HEX * total + (c - 'A' + 10); /* accumulate digit */ c = *pcTmp++; /* get next Char */ } else if('a' <= c && 'f' >= c) { total = AT_CHECK_BASE_HEX * total + (c - 'a' + 10); /* accumulate digit */ c = *pcTmp++; /* get next Char */ } else { return AT_FAILURE; } } *pRtn = total; /* return result, negated if necessary */ return AT_SUCCESS; } #if 0 /****************************************************************************** ¹¦ÄÜÃèÊö: °Ñ°Ë½øÖÆ×Ö·û´®×ª³ÉÎÞ·ûºÅÕûÐÍÖµ ²ÎÊý˵Ã÷: nptr [in/out] ÊäÈëµÄ×Ö·û´®ÄÚÈÝÖ¸Õë usLen [in] ÊäÈëµÄ×Ö·û´®³¤¶È pRtn [in/out] ÓÉ×Ö·û´®×ª»»ËùµÃÕûÐÍÖµ ·µ »Ø Öµ: AT_FAILURE: ÊäÈë×Ö·û´®ÖÐÓзÇÊý×Ö×Ö·û£¬»òÊýÖµÒç³ö AT_SUCCESS: ³É¹¦ ******************************************************************************/ static VOS_UINT32 auc2ulOct( VOS_UINT8 *nptr, VOS_UINT16 usLen, VOS_UINT32 *pRtn) { VOS_UINT8 c = 0; /* current Char */ VOS_UINT32 total = 0; /* current total */ VOS_UINT16 usLength = 1; /* current Length */ VOS_UINT8 *pcTmp = nptr + 1; /* ´Ó0xºó¿ªÊ¼±È½Ï */ /* ²ÎÊýÖ¸ÕëÓɵ÷ÓÃÕß±£Ö¤²»ÎªNULL, ¸Ã´¦²»×öÅÐ¶Ï */ c = *pcTmp++; while(usLength++ < usLen) { /* ×Ö·û¼ì²é */ if('0' <= c && '7' >= c) { /* 0xFFFFFFFF = 037777777777 */ /* 0x1FFFFFFF 03777777777 */ if( total > 0x1FFFFFFF ) { return AT_FAILURE; } total = AT_CHECK_BASE_OCT * total + (c - '0'); /* accumulate digit */ c = *pcTmp++; /* get next Char */ } else { return AT_FAILURE; } } *pRtn = total; /* return result, negated if necessary */ return AT_SUCCESS; } #endif /****************************************************************************** ¹¦ÄÜÃèÊö: °ÑÊ®½øÖÆ×Ö·û´®×ª³ÉÎÞ·ûºÅÕûÐÍÖµ ²ÎÊý˵Ã÷: nptr [in/out] ÊäÈëµÄ×Ö·û´®ÄÚÈÝÖ¸Õë usLen [in] ÊäÈëµÄ×Ö·û´®³¤¶È pRtn [in/out] ÓÉ×Ö·û´®×ª»»ËùµÃÕûÐÍÖµ ·µ »Ø Öµ: AT_FAILURE: ÊäÈë×Ö·û´®ÖÐÓзÇÊý×Ö×Ö·û£¬»òÊýÖµÒç³ö AT_SUCCESS: ³É¹¦ ******************************************************************************/ static VOS_UINT32 auc2ulDec( VOS_UINT8 *nptr, VOS_UINT16 usLen, VOS_UINT32 *pRtn) { VOS_UINT32 c = 0; /* current Char */ VOS_UINT32 total = 0; /* current total */ VOS_UINT16 usLength = 0; /* current Length */ VOS_UINT8 *pcTmp = nptr; /* ´Ó0xºó¿ªÊ¼±È½Ï */ /* ²ÎÊýÖ¸ÕëÓɵ÷ÓÃÕß±£Ö¤²»ÎªNULL, ¸Ã´¦²»×öÅÐ¶Ï */ c = (VOS_UINT32)*pcTmp++; while(usLength++ < usLen) { /* ×Ö·û¼ì²é */ if(isdigit(c)) { /* 0xFFFFFFFF = 4294967295 */ if(((total == 429496729) && (c > '5')) || (total > 429496729)) { return AT_FAILURE; } total = AT_CHECK_BASE_DEC * total + (c - '0'); /* accumulate digit */ c = (VOS_UINT32)(VOS_UINT8)*pcTmp++; /* get next Char */ } else { return AT_FAILURE; } } *pRtn = total; /* return result, negated if necessary */ return AT_SUCCESS; } /****************************************************************************** ¹¦ÄÜÃèÊö: °Ñ×Ö·û´®×ª³ÉÎÞ·ûºÅÕûÐÍÖµ ²ÎÊý˵Ã÷: nptr [in/out] ÊäÈëµÄ×Ö·û´®ÄÚÈÝÖ¸Õë usLen [in] ÊäÈëµÄ×Ö·û´®³¤¶È pRtn [in/out] ÓÉ×Ö·û´®×ª»»ËùµÃÕûÐÍÖµ ·µ »Ø Öµ: AT_FAILURE: ÊäÈë×Ö·û´®ÖÐÓзÇÊý×Ö×Ö·û£¬»òÊýÖµÒç³ö AT_SUCCESS: ³É¹¦ ******************************************************************************/ VOS_UINT32 atAuc2ul( VOS_UINT8 *nptr,VOS_UINT16 usLen, VOS_UINT32 *pRtn) { /* ½øÈë¸Ãº¯Êýǰ£¬ËùÓвÎÊýÒѽøÐмì²é£¬±£Ö¤²»ÎªNULL */ if(NULL == nptr || 0 == usLen || NULL == pRtn) { return AT_FAILURE; } if('0' == *nptr) { if(2 < usLen && (('x' == *(nptr + 1)) || ('X' == *(nptr + 1)))) { return auc2ulHex(nptr, usLen, pRtn); } else { } } return auc2ulDec(nptr, usLen, pRtn); } VOS_VOID* At_HeapAllocD(VOS_UINT32 ulSize) { VOS_VOID* ret = NULL; if((ulSize == 0) || (ulSize > (1024*1024))) { return NULL; } #if (VOS_VXWORKS == VOS_OS_VER) ret = (VOS_VOID *)malloc(ulSize); #elif (VOS_LINUX == VOS_OS_VER) ret = (VOS_VOID *)kmalloc(ulSize, GFP_KERNEL); #else ret = (VOS_VOID *)malloc(ulSize); #endif return ret; } VOS_VOID At_HeapFreeD(VOS_VOID *pAddr) { if(pAddr == NULL) { return ; } #if (VOS_VXWORKS == VOS_OS_VER) free(pAddr); #elif (VOS_LINUX == VOS_OS_VER) kfree(pAddr); #else free(pAddr); #endif return; } #ifdef __cplusplus #if __cplusplus } #endif #endif
gpl-2.0
gearslam/himawhlspcs
sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
9
245342
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <sound/q6adm-v2.h> #include <sound/q6asm-v2.h> #include <sound/q6afe-v2.h> #include <sound/tlv.h> #include <sound/asound.h> #include <sound/pcm_params.h> #include <sound/q6core.h> #include <sound/audio_cal_utils.h> #include <sound/msm-dts-eagle.h> #include <sound/audio_effects.h> #include <sound/hwdep.h> #include "msm-pcm-routing-v2.h" #include "msm-pcm-routing-devdep.h" #include "msm-qti-pp-config.h" #include "msm-dts-srs-tm-config.h" #include "msm-dolby-dap-config.h" #include "msm-ds2-dap-config.h" #include "q6voice.h" #include "sound/q6lsm.h" #undef pr_info #undef pr_err #define pr_info(fmt, ...) pr_aud_info(fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) pr_aud_err(fmt, ##__VA_ARGS__) static int get_cal_path(int path_type); #define EC_PORT_ID_PRIMARY_MI2S_TX 1 #define EC_PORT_ID_SECONDARY_MI2S_TX 2 #define EC_PORT_ID_TERTIARY_MI2S_TX 3 #define EC_PORT_ID_QUATERNARY_MI2S_TX 4 static struct mutex routing_lock; static struct cal_type_data *cal_data; static int fm_switch_enable; static int fm_pcmrx_switch_enable; static int lsm_mux_slim_port; static int slim0_rx_aanc_fb_port; static int msm_route_ec_ref_rx = 8; static uint32_t voc_session_id = ALL_SESSION_VSID; static int msm_route_ext_ec_ref = AFE_PORT_INVALID; static bool is_custom_stereo_on; static bool is_ds2_on; enum { MADNONE, MADAUDIO, MADBEACON, MADULTRASOUND, MADSWAUDIO, }; #define SLIMBUS_0_TX_TEXT "SLIMBUS_0_TX" #define SLIMBUS_1_TX_TEXT "SLIMBUS_1_TX" #define SLIMBUS_2_TX_TEXT "SLIMBUS_2_TX" #define SLIMBUS_3_TX_TEXT "SLIMBUS_3_TX" #define SLIMBUS_4_TX_TEXT "SLIMBUS_4_TX" #define SLIMBUS_5_TX_TEXT "SLIMBUS_5_TX" #define TERT_MI2S_TX_TEXT "TERT_MI2S_TX" #define LSM_FUNCTION_TEXT "LSM Function" static const char * const mad_audio_mux_text[] = { "None", SLIMBUS_0_TX_TEXT, SLIMBUS_1_TX_TEXT, SLIMBUS_2_TX_TEXT, SLIMBUS_3_TX_TEXT, SLIMBUS_4_TX_TEXT, SLIMBUS_5_TX_TEXT, TERT_MI2S_TX_TEXT }; #define INT_RX_VOL_MAX_STEPS 0x2000 #define INT_RX_VOL_GAIN 0x2000 static int msm_route_afe_tert_mi2s_vol_control= 0x2000; static int msm_route_afe_quat_mi2s_vol_control= 0x2000; static const DECLARE_TLV_DB_LINEAR(afe_mi2s_vol_gain, 0, INT_RX_VOL_MAX_STEPS); struct msm_pcm_route_bdai_pp_params { u16 port_id; unsigned long pp_params_config; bool mute_on; int latency; }; static struct msm_pcm_route_bdai_pp_params msm_bedais_pp_params[MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX] = { {HDMI_RX, 0, 0, 0} }; static int msm_routing_send_device_pp_params(int port_id, int copp_idx); static void msm_pcm_routing_cfg_pp(int port_id, int copp_idx, int topology, int channels) { int rc = 0; switch (topology) { case SRS_TRUMEDIA_TOPOLOGY_ID: pr_debug("%s: SRS_TRUMEDIA_TOPOLOGY_ID\n", __func__); msm_dts_srs_tm_init(port_id, copp_idx); break; case DS2_ADM_COPP_TOPOLOGY_ID: pr_debug("%s: DS2_ADM_COPP_TOPOLOGY %d\n", __func__, DS2_ADM_COPP_TOPOLOGY_ID); rc = msm_ds2_dap_init(port_id, copp_idx, channels, is_custom_stereo_on); if (rc < 0) pr_err("%s: DS2 topo_id 0x%x, port %d, CS %d rc %d\n", __func__, topology, port_id, is_custom_stereo_on, rc); break; case DOLBY_ADM_COPP_TOPOLOGY_ID: if (is_ds2_on) { pr_debug("%s: DS2_ADM_COPP_TOPOLOGY\n", __func__); rc = msm_ds2_dap_init(port_id, copp_idx, channels, is_custom_stereo_on); if (rc < 0) pr_err("%s:DS2 topo_id 0x%x, port %d, rc %d\n", __func__, topology, port_id, rc); } else { pr_debug("%s: DOLBY_ADM_COPP_TOPOLOGY_ID\n", __func__); rc = msm_dolby_dap_init(port_id, copp_idx, channels, is_custom_stereo_on); if (rc < 0) pr_err("%s: DS1 topo_id 0x%x, port %d, rc %d\n", __func__, topology, port_id, rc); } break; case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX: pr_debug("%s: DTS_EAGLE_COPP_TOPOLOGY_ID\n", __func__); msm_dts_eagle_init_post(port_id, copp_idx); break; default: break; } } static void msm_pcm_routing_deinit_pp(int port_id, int topology) { switch (topology) { case SRS_TRUMEDIA_TOPOLOGY_ID: pr_debug("%s: SRS_TRUMEDIA_TOPOLOGY_ID\n", __func__); msm_dts_srs_tm_deinit(port_id); break; case DS2_ADM_COPP_TOPOLOGY_ID: pr_debug("%s: DS2_ADM_COPP_TOPOLOGY_ID %d\n", __func__, DS2_ADM_COPP_TOPOLOGY_ID); msm_ds2_dap_deinit(port_id); break; case DOLBY_ADM_COPP_TOPOLOGY_ID: if (is_ds2_on) { pr_debug("%s: DS2_ADM_COPP_TOPOLOGY_ID\n", __func__); msm_ds2_dap_deinit(port_id); } else { pr_debug("%s: DOLBY_ADM_COPP_TOPOLOGY_ID\n", __func__); msm_dolby_dap_deinit(port_id); } break; case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX: pr_debug("%s: DTS_EAGLE_COPP_TOPOLOGY_ID\n", __func__); msm_dts_eagle_deinit_post(port_id, topology); break; default: break; } } static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload, int path_type, int perf_mode) { int itr = 0; if ((path_type == ADM_PATH_PLAYBACK) && (perf_mode == LEGACY_PCM_MODE) && is_custom_stereo_on) { for (itr = 0; itr < payload.num_copps; itr++) { if ((payload.port_id[itr] == SLIMBUS_0_RX) || (payload.port_id[itr] == RT_PROXY_PORT_001_RX)) { msm_qti_pp_send_stereo_to_custom_stereo_cmd( payload.port_id[itr], payload.copp_idx[itr], payload.session_id, Q14_GAIN_ZERO_POINT_FIVE, Q14_GAIN_ZERO_POINT_FIVE, Q14_GAIN_ZERO_POINT_FIVE, Q14_GAIN_ZERO_POINT_FIVE); } } } } #define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = { { PRIMARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX}, { PRIMARY_I2S_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX}, { SLIMBUS_0_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX}, { SLIMBUS_0_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX}, { HDMI_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_HDMI}, { INT_BT_SCO_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX}, { INT_BT_SCO_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX}, { INT_FM_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX}, { INT_FM_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX}, { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX}, { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX}, { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AUXPCM_RX}, { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AUXPCM_TX}, { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_VOICE_PLAYBACK_TX}, { VOICE2_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_VOICE2_PLAYBACK_TX}, { VOICE_RECORD_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX}, { VOICE_RECORD_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX}, { MI2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX}, { MI2S_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX}, { SECONDARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX}, { SLIMBUS_1_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX}, { SLIMBUS_1_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX}, { SLIMBUS_4_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX}, { SLIMBUS_4_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX}, { SLIMBUS_3_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX}, { SLIMBUS_3_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX}, { SLIMBUS_5_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX}, { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX}, { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX}, { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX}, { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_TERT_MI2S_RX}, { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_TERT_MI2S_TX}, { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_MI2S_RX}, { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_MI2S_TX}, { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_MI2S_RX}, { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_MI2S_TX}, { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_TERT_MI2S_RX}, { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_TERT_MI2S_TX}, { AUDIO_PORT_ID_I2S_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AUDIO_I2S_RX}, { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_AUXPCM_RX}, { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_AUXPCM_TX}, { SLIMBUS_6_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX}, { SLIMBUS_6_TX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX}, { AFE_PORT_ID_SPDIF_RX, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX}, { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_MI2S_RX_SD1}, }; static struct msm_pcm_routing_fdai_data fe_dai_map[MSM_FRONTEND_DAI_MM_SIZE][2] = { {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} }, {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } }, }; static unsigned long session_copp_map[MSM_FRONTEND_DAI_MM_SIZE][2] [MSM_BACKEND_DAI_MAX]; static struct msm_pcm_routing_app_type_data app_type_cfg[MAX_APP_TYPES]; static struct msm_pcm_stream_app_type_cfg fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MM_SIZE]; static struct htc_adm_effect_s htc_adm_effect[HTC_ADM_EFFECT_MAX]; static int msm_routing_send_htc_adm_params(u16 port_id, int copp_idx, int perf_mode) { int i = 0; int ret = 0; if(perf_mode != LEGACY_PCM_MODE) return ret; for(i=0; i<HTC_ADM_EFFECT_MAX; i++) { if(htc_adm_effect[i].used) { if(htc_adm_effect[i].port_id == port_id) { pr_info("%s: apply copp_idx 0x%x param_id 0x%x\n",__func__,copp_idx, htc_adm_effect[i].param_id); ret = q6adm_enable_effect(port_id, copp_idx, htc_adm_effect[i].copp_id, htc_adm_effect[i].param_id, htc_adm_effect[i].payload_size, htc_adm_effect[i].payload); if(ret < 0) { pr_info("%s: set copp_idx 0x%x param_id 0x%x fail\n",__func__,copp_idx, htc_adm_effect[i].param_id); return ret; } } } } return ret; } int htc_adm_effect_control(enum HTC_ADM_EFFECT_ID effect_id, u16 port_id, uint32_t copp_id, uint32_t param_id, uint32_t payload_size, void *payload ) { int be_idx = -1; int i = 0; int copp_idx = 0; int ret = 0; mutex_lock(&routing_lock); if(htc_adm_effect[effect_id].used) { kfree(htc_adm_effect[effect_id].payload); htc_adm_effect[effect_id].used = 0; } htc_adm_effect[effect_id].payload = kzalloc(payload_size, GFP_KERNEL); if(htc_adm_effect[effect_id].payload) { memcpy(htc_adm_effect[effect_id].payload, payload, payload_size); htc_adm_effect[effect_id].payload_size = payload_size; htc_adm_effect[effect_id].port_id = port_id; htc_adm_effect[effect_id].copp_id = copp_id; htc_adm_effect[effect_id].param_id = param_id; htc_adm_effect[effect_id].used = 1; } for(i=0; i<MSM_BACKEND_DAI_MAX; i++) { if(msm_bedais[i].port_id == port_id && msm_bedais[i].active) { be_idx = i; break; } } if(be_idx >= 0 && be_idx < MSM_BACKEND_DAI_MAX) { for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) { if(fe_dai_map[i][SESSION_TYPE_RX].perf_mode != LEGACY_PCM_MODE) continue; for (copp_idx = 0; copp_idx < MAX_COPPS_PER_PORT; copp_idx++) { unsigned long copp = session_copp_map[i] [SESSION_TYPE_RX][be_idx]; if (test_bit(copp_idx, &copp)) { pr_info("%s: apply fe %d copp_idx 0x%x param_id 0x%x\n",__func__,i,copp_idx, param_id); ret = q6adm_enable_effect(port_id, copp_idx, copp_id, param_id, payload_size, payload); if(ret < 0) { pr_info("%s: set fe %d copp_id 0x%x fail\n",__func__,i,copp_id); mutex_unlock(&routing_lock); return ret; } } } } } else { pr_info("%s:port id 0x%x is not active or found\n",__func__,port_id); } mutex_unlock(&routing_lock); return ret; } int msm_pcm_routing_get_port(struct snd_pcm_substream *substream, u16 *port_id) { int cnt = 0, i; struct snd_soc_pcm_runtime *rtd = substream->private_data; unsigned int fe_id = rtd->dai_link->be_id; if(!port_id || sizeof(port_id) < MSM_BACKEND_DAI_MAX) { pr_err("%s: size of port_id array is not enough\n",__func__); return -EINVAL; } if(fe_id >= MSM_FRONTEND_DAI_MM_SIZE) { pr_err("%s: fe id %d exceed max mm size %d\n",__func__,(int)fe_id,(int)MSM_FRONTEND_DAI_MM_SIZE); return -EINVAL; } for(i=0; i<MSM_BACKEND_DAI_MAX; i++) { if(test_bit(fe_id, &msm_bedais[i].fe_sessions)) { port_id[cnt] = msm_bedais[i].port_id; cnt++; } } return cnt; } void msm_pcm_routing_get_bedai_info(int be_idx, struct msm_pcm_routing_bdai_data *be_dai) { if (be_idx >= 0 && be_idx < MSM_BACKEND_DAI_MAX) memcpy(be_dai, &msm_bedais[be_idx], sizeof(struct msm_pcm_routing_bdai_data)); } void msm_pcm_routing_get_fedai_info(int fe_idx, int sess_type, struct msm_pcm_routing_fdai_data *fe_dai) { if ((sess_type == SESSION_TYPE_TX) || (sess_type == SESSION_TYPE_RX)) memcpy(fe_dai, &fe_dai_map[fe_idx][sess_type], sizeof(struct msm_pcm_routing_fdai_data)); } void msm_pcm_routing_acquire_lock(void) { mutex_lock(&routing_lock); } void msm_pcm_routing_release_lock(void) { mutex_unlock(&routing_lock); } static int msm_pcm_routing_get_app_type_idx(int app_type) { int idx; pr_debug("%s: app_type: %d\n", __func__, app_type); for (idx = 0; idx < MAX_APP_TYPES; idx++) { if (app_type_cfg[idx].app_type == app_type) return idx; } pr_info("%s: App type not available, fallback to default\n", __func__); return 0; } void msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int app_type, int acdb_dev_id, int sample_rate) { pr_debug("%s: fedai_id %d, app_type %d, sample_rate %d\n", __func__, fedai_id, app_type, sample_rate); if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID %d\n", __func__, fedai_id); return; } fe_dai_app_type_cfg[fedai_id].app_type = app_type; fe_dai_app_type_cfg[fedai_id].acdb_dev_id = acdb_dev_id; fe_dai_app_type_cfg[fedai_id].sample_rate = sample_rate; } static struct cal_block_data *msm_routing_find_topology_by_path(int path) { struct list_head *ptr, *next; struct cal_block_data *cal_block = NULL; pr_debug("%s\n", __func__); list_for_each_safe(ptr, next, &cal_data->cal_blocks) { cal_block = list_entry(ptr, struct cal_block_data, list); if (((struct audio_cal_info_adm_top *)cal_block->cal_info) ->path == path) { return cal_block; } } pr_debug("%s: Can't find topology for path %d\n", __func__, path); return NULL; } static struct cal_block_data *msm_routing_find_topology(int path, int app_type, int acdb_id, int sample_rate) { struct list_head *ptr, *next; struct cal_block_data *cal_block = NULL; struct audio_cal_info_adm_top *cal_info; pr_debug("%s\n", __func__); list_for_each_safe(ptr, next, &cal_data->cal_blocks) { cal_block = list_entry(ptr, struct cal_block_data, list); cal_info = (struct audio_cal_info_adm_top *) cal_block->cal_info; if ((cal_info->path == path) && (cal_info->app_type == app_type) && (cal_info->acdb_id == acdb_id) && (cal_info->sample_rate == sample_rate)) { return cal_block; } } pr_debug("%s: Can't find topology for path %d, app %d, acdb_id %d sample_rate %d defaulting to search by path\n", __func__, path, app_type, acdb_id, sample_rate); return msm_routing_find_topology_by_path(path); } static int msm_routing_get_adm_topology(int path, int fedai_id) { int topology = NULL_COPP_TOPOLOGY; struct cal_block_data *cal_block = NULL; int app_type = 0, acdb_dev_id = 0, sample_rate = 0; pr_debug("%s\n", __func__); path = get_cal_path(path); if (cal_data == NULL) goto done; mutex_lock(&cal_data->lock); if (path == RX_DEVICE) { app_type = fe_dai_app_type_cfg[fedai_id].app_type; acdb_dev_id = fe_dai_app_type_cfg[fedai_id].acdb_dev_id; sample_rate = fe_dai_app_type_cfg[fedai_id].sample_rate; } cal_block = msm_routing_find_topology(path, app_type, acdb_dev_id, sample_rate); if (cal_block == NULL) goto unlock; topology = ((struct audio_cal_info_adm_top *) cal_block->cal_info)->topology; unlock: mutex_unlock(&cal_data->lock); done: pr_debug("%s: Using topology %d\n", __func__, topology); return topology; } static uint8_t is_be_dai_extproc(int be_dai) { if (be_dai == MSM_BACKEND_DAI_EXTPROC_RX || be_dai == MSM_BACKEND_DAI_EXTPROC_TX || be_dai == MSM_BACKEND_DAI_EXTPROC_EC_TX) return 1; else return 0; } static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type, int path_type, int perf_mode) { int i, port_type, j, num_copps = 0; struct route_payload payload; port_type = ((path_type == ADM_PATH_PLAYBACK || path_type == ADM_PATH_COMPRESSED_RX) ? MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX); for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) { if (!is_be_dai_extproc(i) && (afe_get_port_type(msm_bedais[i].port_id) == port_type) && (msm_bedais[i].active) && (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) { for (j = 0; j < MAX_COPPS_PER_PORT; j++) { unsigned long copp = session_copp_map[fedai_id][sess_type][i]; if (test_bit(j, &copp)) { payload.port_id[num_copps] = msm_bedais[i].port_id; payload.copp_idx[num_copps] = j; num_copps++; } } } } if (num_copps) { payload.num_copps = num_copps; payload.session_id = fe_dai_map[fedai_id][sess_type].strm_id; payload.app_type = fe_dai_app_type_cfg[fedai_id].app_type; payload.acdb_dev_id = fe_dai_app_type_cfg[fedai_id].acdb_dev_id; payload.sample_rate = fe_dai_app_type_cfg[fedai_id].sample_rate; adm_matrix_map(path_type, payload, perf_mode); msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode); for(i=0; i<num_copps; i++) { msm_routing_send_htc_adm_params(payload.port_id[i],payload.copp_idx[i], perf_mode); } } } void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id, int stream_type) { int i, session_type, path_type, port_type; u32 mode = 0; if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID\n", __func__); return; } if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) { session_type = SESSION_TYPE_RX; path_type = ADM_PATH_PLAYBACK; port_type = MSM_AFE_PORT_TYPE_RX; } else { session_type = SESSION_TYPE_TX; path_type = ADM_PATH_LIVE_REC; port_type = MSM_AFE_PORT_TYPE_TX; } mutex_lock(&routing_lock); fe_dai_map[fedai_id][session_type].strm_id = dspst_id; for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) { if (!is_be_dai_extproc(i) && (afe_get_port_type(msm_bedais[i].port_id) == port_type) && (msm_bedais[i].active) && (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) { mode = afe_get_port_type(msm_bedais[i].port_id); adm_connect_afe_port(mode, dspst_id, msm_bedais[i].port_id); break; } } mutex_unlock(&routing_lock); } int msm_pcm_routing_reg_phy_compr_stream(int fe_id, bool perf_mode, int dspst_id, int stream_type, uint32_t compr_passthr_mode) { int i, j, session_type, path_type, port_type, topology, num_copps = 0; struct route_payload payload; u32 channels, sample_rate; u16 bit_width = 16; pr_debug("%s:fe_id[%d] perf_mode[%d] id[%d] stream_type[%d] passt[%d]", __func__, fe_id, perf_mode, dspst_id, stream_type, compr_passthr_mode); if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID %d\n", __func__, fe_id); return -EINVAL; } if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) { session_type = SESSION_TYPE_RX; if (compr_passthr_mode != LEGACY_PCM) path_type = ADM_PATH_COMPRESSED_RX; else path_type = ADM_PATH_PLAYBACK; port_type = MSM_AFE_PORT_TYPE_RX; } else if (stream_type == SNDRV_PCM_STREAM_CAPTURE) { session_type = SESSION_TYPE_TX; path_type = ADM_PATH_LIVE_REC; port_type = MSM_AFE_PORT_TYPE_TX; } else { pr_err("%s: invalid stream type %d\n", __func__, stream_type); return -EINVAL; } mutex_lock(&routing_lock); payload.num_copps = 0; fe_dai_map[fe_id][session_type].strm_id = dspst_id; msm_qti_pp_send_eq_values(fe_id); for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) { if (test_bit(fe_id, &msm_bedais[i].fe_sessions)) msm_bedais[i].compr_passthr_mode = compr_passthr_mode; if (!is_be_dai_extproc(i) && (afe_get_port_type(msm_bedais[i].port_id) == port_type) && (msm_bedais[i].active) && (test_bit(fe_id, &msm_bedais[i].fe_sessions))) { int app_type, app_type_idx, copp_idx, acdb_dev_id; channels = msm_bedais[i].channel; if (msm_bedais[i].format == SNDRV_PCM_FORMAT_S16_LE) bit_width = 16; else if (msm_bedais[i].format == SNDRV_PCM_FORMAT_S24_LE) bit_width = 24; app_type = (stream_type == SNDRV_PCM_STREAM_PLAYBACK) ? fe_dai_app_type_cfg[fe_id].app_type : 0; if (app_type) { app_type_idx = msm_pcm_routing_get_app_type_idx( app_type); sample_rate = app_type_cfg[app_type_idx].sample_rate; bit_width = app_type_cfg[app_type_idx].bit_width; } else { sample_rate = msm_bedais[i].sample_rate; } acdb_dev_id = fe_dai_app_type_cfg[fe_id].acdb_dev_id; topology = msm_routing_get_adm_topology(path_type, fe_id); pr_err("%s: Before adm open topology %d\n", __func__, topology); copp_idx = adm_open(msm_bedais[i].port_id, path_type, sample_rate, channels, topology, perf_mode, bit_width, app_type, acdb_dev_id); if ((copp_idx < 0) && (copp_idx >= MAX_COPPS_PER_PORT)) { pr_err("%s:adm open failed coppid:%d\n", __func__, copp_idx); mutex_unlock(&routing_lock); return -EINVAL; } pr_debug("%s: set idx bit of fe:%d, type: %d, be:%d\n", __func__, fe_id, session_type, i); set_bit(copp_idx, &session_copp_map[fe_id][session_type][i]); for (j = 0; j < MAX_COPPS_PER_PORT; j++) { unsigned long copp = session_copp_map[fe_id][session_type][i]; if (test_bit(j, &copp)) { payload.port_id[num_copps] = msm_bedais[i].port_id; payload.copp_idx[num_copps] = j; num_copps++; } } msm_routing_send_device_pp_params(msm_bedais[i].port_id, copp_idx); } } if (num_copps) { payload.num_copps = num_copps; payload.session_id = fe_dai_map[fe_id][session_type].strm_id; payload.app_type = fe_dai_app_type_cfg[fe_id].app_type; payload.acdb_dev_id = fe_dai_app_type_cfg[fe_id].acdb_dev_id; adm_matrix_map(path_type, payload, perf_mode); msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode); for(i=0; i<num_copps; i++) { msm_routing_send_htc_adm_params(payload.port_id[i],payload.copp_idx[i],perf_mode); } } mutex_unlock(&routing_lock); return 0; } int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode, int dspst_id, int stream_type) { int i, j, session_type, path_type, port_type, topology, num_copps = 0; struct route_payload payload; u32 channels, sample_rate; uint16_t bits_per_sample = 16; if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID %d\n", __func__, fedai_id); return -EINVAL; } if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) { session_type = SESSION_TYPE_RX; path_type = ADM_PATH_PLAYBACK; port_type = MSM_AFE_PORT_TYPE_RX; } else { session_type = SESSION_TYPE_TX; path_type = ADM_PATH_LIVE_REC; port_type = MSM_AFE_PORT_TYPE_TX; } mutex_lock(&routing_lock); payload.num_copps = 0; fe_dai_map[fedai_id][session_type].strm_id = dspst_id; fe_dai_map[fedai_id][session_type].perf_mode = perf_mode; msm_qti_pp_send_eq_values(fedai_id); for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) { if (!is_be_dai_extproc(i) && (afe_get_port_type(msm_bedais[i].port_id) == port_type) && (msm_bedais[i].active) && (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) { int app_type, app_type_idx, copp_idx, acdb_dev_id; channels = msm_bedais[i].channel; msm_bedais[i].compr_passthr_mode = LEGACY_PCM; if (msm_bedais[i].format == SNDRV_PCM_FORMAT_S16_LE) bits_per_sample = 16; else if (msm_bedais[i].format == SNDRV_PCM_FORMAT_S24_LE) bits_per_sample = 24; app_type = (stream_type == SNDRV_PCM_STREAM_PLAYBACK) ? fe_dai_app_type_cfg[fedai_id].app_type : 0; if (app_type) { app_type_idx = msm_pcm_routing_get_app_type_idx(app_type); sample_rate = fe_dai_app_type_cfg[fedai_id].sample_rate; bits_per_sample = app_type_cfg[app_type_idx].bit_width; } else sample_rate = msm_bedais[i].sample_rate; acdb_dev_id = fe_dai_app_type_cfg[fedai_id].acdb_dev_id; topology = msm_routing_get_adm_topology(path_type, fedai_id); copp_idx = adm_open(msm_bedais[i].port_id, path_type, sample_rate, channels, topology, perf_mode, bits_per_sample, app_type, acdb_dev_id); if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) { pr_err("%s: adm open failed copp_idx:%d\n", __func__, copp_idx); mutex_unlock(&routing_lock); return -EINVAL; } pr_debug("%s: setting idx bit of fe:%d, type: %d, be:%d\n", __func__, fedai_id, session_type, i); set_bit(copp_idx, &session_copp_map[fedai_id][session_type][i]); for (j = 0; j < MAX_COPPS_PER_PORT; j++) { unsigned long copp = session_copp_map[fedai_id][session_type][i]; if (test_bit(j, &copp)) { payload.port_id[num_copps] = msm_bedais[i].port_id; payload.copp_idx[num_copps] = j; num_copps++; } } if ((perf_mode == LEGACY_PCM_MODE) && (msm_bedais[i].compr_passthr_mode == LEGACY_PCM)) msm_pcm_routing_cfg_pp(msm_bedais[i].port_id, copp_idx, topology, channels); } } if (num_copps) { payload.num_copps = num_copps; payload.session_id = fe_dai_map[fedai_id][session_type].strm_id; payload.app_type = fe_dai_app_type_cfg[fedai_id].app_type; payload.acdb_dev_id = fe_dai_app_type_cfg[fedai_id].acdb_dev_id; payload.sample_rate = fe_dai_app_type_cfg[fedai_id].sample_rate; adm_matrix_map(path_type, payload, perf_mode); msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode); for(i=0; i<num_copps; i++) { msm_routing_send_htc_adm_params(payload.port_id[i],payload.copp_idx[i],perf_mode); } } mutex_unlock(&routing_lock); return 0; } int msm_pcm_routing_reg_phy_stream_v2(int fedai_id, bool perf_mode, int dspst_id, int stream_type, struct msm_pcm_routing_evt event_info) { if (msm_pcm_routing_reg_phy_stream(fedai_id, perf_mode, dspst_id, stream_type)) { pr_err("%s: failed to reg phy stream\n", __func__); return -EINVAL; } if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) fe_dai_map[fedai_id][SESSION_TYPE_RX].event_info = event_info; else fe_dai_map[fedai_id][SESSION_TYPE_TX].event_info = event_info; return 0; } void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type) { int i, port_type, session_type, path_type, topology; struct msm_pcm_routing_fdai_data *fdai; if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID\n", __func__); return; } if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) { port_type = MSM_AFE_PORT_TYPE_RX; session_type = SESSION_TYPE_RX; path_type = ADM_PATH_PLAYBACK; } else { port_type = MSM_AFE_PORT_TYPE_TX; session_type = SESSION_TYPE_TX; path_type = ADM_PATH_LIVE_REC; } mutex_lock(&routing_lock); for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) { if (!is_be_dai_extproc(i) && (afe_get_port_type(msm_bedais[i].port_id) == port_type) && (msm_bedais[i].active) && (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) { int idx; unsigned long copp = session_copp_map[fedai_id][session_type][i]; fdai = &fe_dai_map[fedai_id][session_type]; for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) if (test_bit(idx, &copp)) break; if (idx >= MAX_COPPS_PER_PORT || idx < 0) { pr_debug("%s: copp idx is invalid, exiting\n", __func__); continue; } topology = adm_get_topology_for_port_copp_idx( msm_bedais[i].port_id, idx); adm_close(msm_bedais[i].port_id, fdai->perf_mode, idx); pr_debug("%s:copp:%ld,idx bit fe:%d,type:%d,be:%d\n", __func__, copp, fedai_id, session_type, i); clear_bit(idx, &session_copp_map[fedai_id][session_type][i]); if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology || DS2_ADM_COPP_TOPOLOGY_ID == topology) && (fdai->perf_mode == LEGACY_PCM_MODE) && (msm_bedais[i].compr_passthr_mode == LEGACY_PCM)) msm_pcm_routing_deinit_pp(msm_bedais[i].port_id, topology); } } fe_dai_map[fedai_id][session_type].strm_id = INVALID_SESSION; fe_dai_map[fedai_id][session_type].be_srate = 0; mutex_unlock(&routing_lock); } static bool msm_pcm_routing_route_is_set(u16 be_id, u16 fe_id) { bool rc = false; if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID\n", __func__); return rc; } if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions)) rc = true; return rc; } static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set) { int session_type, path_type, topology; u32 channels, sample_rate; uint16_t bits_per_sample = 16; struct msm_pcm_routing_fdai_data *fdai; pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set); if (val > MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID\n", __func__); return; } if (afe_get_port_type(msm_bedais[reg].port_id) == MSM_AFE_PORT_TYPE_RX) { session_type = SESSION_TYPE_RX; if (msm_bedais[reg].compr_passthr_mode != LEGACY_PCM) path_type = ADM_PATH_COMPRESSED_RX; else path_type = ADM_PATH_PLAYBACK; } else { session_type = SESSION_TYPE_TX; path_type = ADM_PATH_LIVE_REC; } mutex_lock(&routing_lock); if (set) { if (!test_bit(val, &msm_bedais[reg].fe_sessions) && ((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) || (msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX))) voc_start_playback(set, msm_bedais[reg].port_id); set_bit(val, &msm_bedais[reg].fe_sessions); fdai = &fe_dai_map[val][session_type]; if (msm_bedais[reg].active && fdai->strm_id != INVALID_SESSION) { int app_type, app_type_idx, copp_idx, acdb_dev_id; channels = msm_bedais[reg].channel; if (session_type == SESSION_TYPE_TX && fdai->be_srate && (fdai->be_srate != msm_bedais[reg].sample_rate)) { pr_debug("%s: flush strm %d diff BE rates\n", __func__, fdai->strm_id); if (fdai->event_info.event_func) fdai->event_info.event_func( MSM_PCM_RT_EVT_BUF_RECFG, fdai->event_info.priv_data); fdai->be_srate = 0; } if (msm_bedais[reg].format == SNDRV_PCM_FORMAT_S24_LE) bits_per_sample = 24; app_type = (session_type == SESSION_TYPE_RX) ? fe_dai_app_type_cfg[val].app_type : 0; if (app_type) { app_type_idx = msm_pcm_routing_get_app_type_idx(app_type); sample_rate = fe_dai_app_type_cfg[val].sample_rate; bits_per_sample = app_type_cfg[app_type_idx].bit_width; } else sample_rate = msm_bedais[reg].sample_rate; topology = msm_routing_get_adm_topology(path_type, val); acdb_dev_id = fe_dai_app_type_cfg[val].acdb_dev_id; copp_idx = adm_open(msm_bedais[reg].port_id, path_type, sample_rate, channels, topology, fdai->perf_mode, bits_per_sample, app_type, acdb_dev_id); if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) { pr_err("%s: adm open failed\n", __func__); mutex_unlock(&routing_lock); return; } pr_debug("%s: setting idx bit of fe:%d, type: %d, be:%d\n", __func__, val, session_type, reg); set_bit(copp_idx, &session_copp_map[val][session_type][reg]); if (session_type == SESSION_TYPE_RX && fdai->event_info.event_func) fdai->event_info.event_func( MSM_PCM_RT_EVT_DEVSWITCH, fdai->event_info.priv_data); msm_pcm_routing_build_matrix(val, session_type, path_type, fdai->perf_mode); if ((fdai->perf_mode == LEGACY_PCM_MODE) && (msm_bedais[reg].compr_passthr_mode == LEGACY_PCM)) msm_pcm_routing_cfg_pp(msm_bedais[reg].port_id, copp_idx, topology, channels); } } else { if (test_bit(val, &msm_bedais[reg].fe_sessions) && ((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) || (msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX))) voc_start_playback(set, msm_bedais[reg].port_id); clear_bit(val, &msm_bedais[reg].fe_sessions); fdai = &fe_dai_map[val][session_type]; if (msm_bedais[reg].active && fdai->strm_id != INVALID_SESSION) { int idx; int port_id; unsigned long copp = session_copp_map[val][session_type][reg]; for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) if (test_bit(idx, &copp)) break; port_id = msm_bedais[reg].port_id; topology = adm_get_topology_for_port_copp_idx(port_id, idx); adm_close(msm_bedais[reg].port_id, fdai->perf_mode, idx); pr_debug("%s: copp: %ld, reset idx bit fe:%d, type: %d, be:%d topology=0x%x\n", __func__, copp, val, session_type, reg, topology); clear_bit(idx, &session_copp_map[val][session_type][reg]); if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology || DS2_ADM_COPP_TOPOLOGY_ID == topology) && (fdai->perf_mode == LEGACY_PCM_MODE) && (msm_bedais[reg].compr_passthr_mode == LEGACY_PCM)) msm_pcm_routing_deinit_pp( msm_bedais[reg].port_id, topology); msm_pcm_routing_build_matrix(val, session_type, path_type, fdai->perf_mode); } } if ((msm_bedais[reg].port_id == VOICE_RECORD_RX) || (msm_bedais[reg].port_id == VOICE_RECORD_TX)) voc_start_record(msm_bedais[reg].port_id, set, voc_session_id); mutex_unlock(&routing_lock); } static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions)) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift, ucontrol->value.integer.value[0]); return 0; } static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; if (ucontrol->value.integer.value[0] && msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) { msm_pcm_routing_process_audio(mc->reg, mc->shift, 1); snd_soc_dapm_mixer_update_power(widget, kcontrol, 1); } else if (!ucontrol->value.integer.value[0] && msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) { msm_pcm_routing_process_audio(mc->reg, mc->shift, 0); snd_soc_dapm_mixer_update_power(widget, kcontrol, 0); } return 1; } static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set) { u32 session_id = 0; pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set); if (val == MSM_FRONTEND_DAI_CS_VOICE) session_id = voc_get_session_id(VOICE_SESSION_NAME); else if (val == MSM_FRONTEND_DAI_VOLTE) session_id = voc_get_session_id(VOLTE_SESSION_NAME); else if (val == MSM_FRONTEND_DAI_VOWLAN) session_id = voc_get_session_id(VOWLAN_SESSION_NAME); else if (val == MSM_FRONTEND_DAI_VOICE2) session_id = voc_get_session_id(VOICE2_SESSION_NAME); else if (val == MSM_FRONTEND_DAI_QCHAT) session_id = voc_get_session_id(QCHAT_SESSION_NAME); else session_id = voc_get_session_id(VOIP_SESSION_NAME); pr_debug("%s: FE DAI 0x%x session_id 0x%x\n", __func__, val, session_id); mutex_lock(&routing_lock); if (set) set_bit(val, &msm_bedais[reg].fe_sessions); else clear_bit(val, &msm_bedais[reg].fe_sessions); if (val == MSM_FRONTEND_DAI_DTMF_RX && afe_get_port_type(msm_bedais[reg].port_id) == MSM_AFE_PORT_TYPE_RX) { pr_debug("%s(): set=%d port id=0x%x for dtmf generation\n", __func__, set, msm_bedais[reg].port_id); afe_set_dtmf_gen_rx_portid(msm_bedais[reg].port_id, set); } mutex_unlock(&routing_lock); if (afe_get_port_type(msm_bedais[reg].port_id) == MSM_AFE_PORT_TYPE_RX) { voc_set_route_flag(session_id, RX_PATH, set); if (set) { voc_set_rxtx_port(session_id, msm_bedais[reg].port_id, DEV_RX); if (voc_get_route_flag(session_id, RX_PATH) && voc_get_route_flag(session_id, TX_PATH)) voc_enable_device(session_id); } else { voc_disable_device(session_id); } } else { voc_set_route_flag(session_id, TX_PATH, set); if (set) { voc_set_rxtx_port(session_id, msm_bedais[reg].port_id, DEV_TX); if (voc_get_route_flag(session_id, RX_PATH) && voc_get_route_flag(session_id, TX_PATH)) voc_enable_device(session_id); } else { voc_disable_device(session_id); } } } static int msm_routing_get_voice_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; mutex_lock(&routing_lock); if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions)) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; mutex_unlock(&routing_lock); pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift, ucontrol->value.integer.value[0]); return 0; } static int msm_routing_put_voice_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; if (ucontrol->value.integer.value[0]) { msm_pcm_routing_process_voice(mc->reg, mc->shift, 1); snd_soc_dapm_mixer_update_power(widget, kcontrol, 1); } else { msm_pcm_routing_process_voice(mc->reg, mc->shift, 0); snd_soc_dapm_mixer_update_power(widget, kcontrol, 0); } return 1; } static int msm_routing_get_voice_stub_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; mutex_lock(&routing_lock); if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions)) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; mutex_unlock(&routing_lock); pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift, ucontrol->value.integer.value[0]); return 0; } static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; if (ucontrol->value.integer.value[0]) { mutex_lock(&routing_lock); set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions); mutex_unlock(&routing_lock); snd_soc_dapm_mixer_update_power(widget, kcontrol, 1); } else { mutex_lock(&routing_lock); clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions); mutex_unlock(&routing_lock); snd_soc_dapm_mixer_update_power(widget, kcontrol, 0); } pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift, ucontrol->value.integer.value[0]); return 1; } int htc_fm_switch_enable = 0; static int msm_htc_routing_get_switch_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = htc_fm_switch_enable; return 0; } static int msm_htc_routing_put_switch_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; pr_debug("%s: FM Switch enable %ld\n", __func__, ucontrol->value.integer.value[0]); if (ucontrol->value.integer.value[0]) snd_soc_dapm_mixer_update_power(widget, kcontrol, 1); else snd_soc_dapm_mixer_update_power(widget, kcontrol, 0); htc_fm_switch_enable = ucontrol->value.integer.value[0]; return 1; } static int msm_routing_get_switch_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = fm_switch_enable; pr_debug("%s: FM Switch enable %ld\n", __func__, ucontrol->value.integer.value[0]); return 0; } static int msm_routing_put_switch_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; pr_debug("%s: FM Switch enable %ld\n", __func__, ucontrol->value.integer.value[0]); if (ucontrol->value.integer.value[0]) snd_soc_dapm_mixer_update_power(widget, kcontrol, 1); else snd_soc_dapm_mixer_update_power(widget, kcontrol, 0); fm_switch_enable = ucontrol->value.integer.value[0]; return 1; } static int msm_routing_get_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = fm_pcmrx_switch_enable; pr_debug("%s: FM Switch enable %ld\n", __func__, ucontrol->value.integer.value[0]); return 0; } static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; pr_debug("%s: FM Switch enable %ld\n", __func__, ucontrol->value.integer.value[0]); if (ucontrol->value.integer.value[0]) snd_soc_dapm_mixer_update_power(widget, kcontrol, 1); else snd_soc_dapm_mixer_update_power(widget, kcontrol, 0); fm_pcmrx_switch_enable = ucontrol->value.integer.value[0]; return 1; } static int msm_routing_lsm_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = lsm_mux_slim_port; return 0; } static int msm_routing_lsm_mux_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; int mux = ucontrol->value.enumerated.item[0]; int lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX; pr_debug("%s: LSM enable %ld\n", __func__, ucontrol->value.integer.value[0]); switch (ucontrol->value.integer.value[0]) { case 1: lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX; break; case 2: lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX; break; case 3: lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX; break; case 4: lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX; break; case 5: lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX; break; case 6: lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX; break; case 7: lsm_port = AFE_PORT_ID_TERTIARY_MI2S_TX; break; default: pr_err("Default lsm port"); break; } set_lsm_port(lsm_port); if (ucontrol->value.integer.value[0]) { lsm_mux_slim_port = ucontrol->value.integer.value[0]; snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e); } else { snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e); lsm_mux_slim_port = ucontrol->value.integer.value[0]; } return 0; } static int msm_routing_lsm_func_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i; u16 port_id; enum afe_mad_type mad_type; pr_debug("%s: enter\n", __func__); for (i = 0; i < ARRAY_SIZE(mad_audio_mux_text); i++) if (!strncmp(kcontrol->id.name, mad_audio_mux_text[i], strlen(mad_audio_mux_text[i]))) break; if (i-- == ARRAY_SIZE(mad_audio_mux_text)) { WARN(1, "Invalid id name %s\n", kcontrol->id.name); return -EINVAL; } if (!strcmp(kcontrol->id.name, mad_audio_mux_text[7])) { ucontrol->value.integer.value[0] = MADSWAUDIO; return 0; } port_id = i * 2 + 1 + SLIMBUS_0_RX; mad_type = afe_port_get_mad_type(port_id); pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id, mad_type); switch (mad_type) { case MAD_HW_NONE: ucontrol->value.integer.value[0] = MADNONE; break; case MAD_HW_AUDIO: ucontrol->value.integer.value[0] = MADAUDIO; break; case MAD_HW_BEACON: ucontrol->value.integer.value[0] = MADBEACON; break; case MAD_HW_ULTRASOUND: ucontrol->value.integer.value[0] = MADULTRASOUND; break; case MAD_SW_AUDIO: ucontrol->value.integer.value[0] = MADSWAUDIO; break; default: WARN(1, "Unknown\n"); return -EINVAL; } return 0; } static int msm_routing_lsm_func_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i; u16 port_id; enum afe_mad_type mad_type; pr_debug("%s: enter\n", __func__); for (i = 0; i < ARRAY_SIZE(mad_audio_mux_text); i++) if (!strncmp(kcontrol->id.name, mad_audio_mux_text[i], strlen(mad_audio_mux_text[i]))) break; if (i-- == ARRAY_SIZE(mad_audio_mux_text)) { WARN(1, "Invalid id name %s\n", kcontrol->id.name); return -EINVAL; } port_id = i * 2 + 1 + SLIMBUS_0_RX; switch (ucontrol->value.integer.value[0]) { case MADNONE: mad_type = MAD_HW_NONE; break; case MADAUDIO: mad_type = MAD_HW_AUDIO; break; case MADBEACON: mad_type = MAD_HW_BEACON; break; case MADULTRASOUND: mad_type = MAD_HW_ULTRASOUND; break; case MADSWAUDIO: mad_type = MAD_SW_AUDIO; break; default: WARN(1, "Unknown\n"); return -EINVAL; } if (!strcmp(kcontrol->id.name, mad_audio_mux_text[7])) { port_id = AFE_PORT_ID_TERTIARY_MI2S_TX; mad_type = MAD_SW_AUDIO; } pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id, mad_type); return afe_port_set_mad_type(port_id, mad_type); } static int msm_routing_slim_0_rx_aanc_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mutex_lock(&routing_lock); ucontrol->value.integer.value[0] = slim0_rx_aanc_fb_port; mutex_unlock(&routing_lock); pr_debug("%s: AANC Mux Port %ld\n", __func__, ucontrol->value.integer.value[0]); return 0; }; static int msm_routing_slim_0_rx_aanc_mux_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct aanc_data aanc_info; mutex_lock(&routing_lock); memset(&aanc_info, 0x00, sizeof(aanc_info)); pr_debug("%s: AANC Mux Port %ld\n", __func__, ucontrol->value.integer.value[0]); slim0_rx_aanc_fb_port = ucontrol->value.integer.value[0]; if (ucontrol->value.integer.value[0] == 0) { aanc_info.aanc_active = false; aanc_info.aanc_tx_port = 0; aanc_info.aanc_rx_port = 0; } else { aanc_info.aanc_active = true; aanc_info.aanc_rx_port = SLIMBUS_0_RX; aanc_info.aanc_tx_port = (SLIMBUS_0_RX - 1 + (slim0_rx_aanc_fb_port * 2)); } afe_set_aanc_info(&aanc_info); mutex_unlock(&routing_lock); return 0; }; static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; if (test_bit(mc->shift, (unsigned long *)&msm_bedais[mc->reg].port_sessions)) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift, ucontrol->value.integer.value[0]); return 0; } static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; pr_debug("%s: reg 0x%x shift 0x%x val %ld\n", __func__, mc->reg, mc->shift, ucontrol->value.integer.value[0]); if (ucontrol->value.integer.value[0]) { afe_loopback(1, msm_bedais[mc->reg].port_id, msm_bedais[mc->shift].port_id); set_bit(mc->shift, (unsigned long *)&msm_bedais[mc->reg].port_sessions); } else { afe_loopback(0, msm_bedais[mc->reg].port_id, msm_bedais[mc->shift].port_id); clear_bit(mc->shift, (unsigned long *)&msm_bedais[mc->reg].port_sessions); } return 1; } static int msm_routing_get_afe_tert_mi2s_vol_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = msm_route_afe_tert_mi2s_vol_control; return 0; } static int msm_routing_set_afe_tert_mi2s_vol_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { afe_loopback_gain(AFE_PORT_ID_TERTIARY_MI2S_TX, ucontrol->value.integer.value[0]); msm_route_afe_tert_mi2s_vol_control = ucontrol->value.integer.value[0]; return 0; } static int msm_routing_get_afe_quat_mi2s_vol_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = msm_route_afe_quat_mi2s_vol_control; return 0; } static int msm_routing_set_afe_quat_mi2s_vol_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { afe_loopback_gain(AFE_PORT_ID_QUATERNARY_MI2S_TX, ucontrol->value.integer.value[0]); msm_route_afe_quat_mi2s_vol_control = ucontrol->value.integer.value[0]; return 0; } static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: ec_ref_rx = %d", __func__, msm_route_ec_ref_rx); mutex_lock(&routing_lock); ucontrol->value.integer.value[0] = msm_route_ec_ref_rx; mutex_unlock(&routing_lock); return 0; } static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ec_ref_port_id; struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; int mux = ucontrol->value.enumerated.item[0]; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; mutex_lock(&routing_lock); switch (ucontrol->value.integer.value[0]) { case 0: msm_route_ec_ref_rx = 0; ec_ref_port_id = AFE_PORT_INVALID; break; case 1: msm_route_ec_ref_rx = 1; ec_ref_port_id = SLIMBUS_0_RX; break; case 2: msm_route_ec_ref_rx = 2; ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX; break; case 3: msm_route_ec_ref_rx = 3; ec_ref_port_id = AFE_PORT_ID_PRIMARY_MI2S_TX; break; case 4: msm_route_ec_ref_rx = 4; ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_TX; break; case 5: msm_route_ec_ref_rx = 5; ec_ref_port_id = AFE_PORT_ID_TERTIARY_MI2S_TX; break; case 6: msm_route_ec_ref_rx = 6; ec_ref_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX; break; case 7: msm_route_ec_ref_rx = 7; ec_ref_port_id = AFE_PORT_ID_SECONDARY_MI2S_RX; break; default: msm_route_ec_ref_rx = 0; pr_err("%s EC ref rx %ld not valid\n", __func__, ucontrol->value.integer.value[0]); ec_ref_port_id = AFE_PORT_INVALID; break; } adm_ec_ref_rx_id(ec_ref_port_id); pr_debug("%s: msm_route_ec_ref_rx = %d\n", __func__, msm_route_ec_ref_rx); mutex_unlock(&routing_lock); snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e); return 0; } static const char *const ec_ref_rx[] = { "None", "SLIM_RX", "I2S_RX", "PRI_MI2S_TX", "SEC_MI2S_TX", "TERT_MI2S_TX", "QUAT_MI2S_TX", "SEC_I2S_RX", "PROXY_RX"}; static const struct soc_enum msm_route_ec_ref_rx_enum[] = { SOC_ENUM_SINGLE_EXT(9, ec_ref_rx), }; static const struct snd_kcontrol_new ext_ec_ref_mux_ul1 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL1 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static const struct snd_kcontrol_new ext_ec_ref_mux_ul2 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL2 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static const struct snd_kcontrol_new ext_ec_ref_mux_ul4 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL4 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static const struct snd_kcontrol_new ext_ec_ref_mux_ul5 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL5 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static const struct snd_kcontrol_new ext_ec_ref_mux_ul6 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL6 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static const struct snd_kcontrol_new ext_ec_ref_mux_ul8 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL8 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static const struct snd_kcontrol_new ext_ec_ref_mux_ul9 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL9 MUX Mux", msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); static int msm_routing_ext_ec_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: ext_ec_ref_rx = %x\n", __func__, msm_route_ext_ec_ref); mutex_lock(&routing_lock); ucontrol->value.integer.value[0] = msm_route_ext_ec_ref; mutex_unlock(&routing_lock); return 0; } static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *widget = wlist->widgets[0]; int mux = ucontrol->value.enumerated.item[0]; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; int ret = 0; bool state = false; pr_debug("%s: msm_route_ec_ref_rx = %d value = %ld\n", __func__, msm_route_ext_ec_ref, ucontrol->value.integer.value[0]); mutex_lock(&routing_lock); switch (ucontrol->value.integer.value[0]) { case EC_PORT_ID_PRIMARY_MI2S_TX: msm_route_ext_ec_ref = AFE_PORT_ID_PRIMARY_MI2S_TX; state = true; break; case EC_PORT_ID_SECONDARY_MI2S_TX: msm_route_ext_ec_ref = AFE_PORT_ID_SECONDARY_MI2S_TX; state = true; break; case EC_PORT_ID_TERTIARY_MI2S_TX: msm_route_ext_ec_ref = AFE_PORT_ID_TERTIARY_MI2S_TX; state = true; break; case EC_PORT_ID_QUATERNARY_MI2S_TX: msm_route_ext_ec_ref = AFE_PORT_ID_QUATERNARY_MI2S_TX; state = true; break; default: msm_route_ext_ec_ref = AFE_PORT_INVALID; break; } if (!voc_set_ext_ec_ref(msm_route_ext_ec_ref, state)) { mutex_unlock(&routing_lock); snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e); } else { ret = -EINVAL; mutex_unlock(&routing_lock); } return ret; } static const char * const ext_ec_ref_rx[] = {"NONE", "PRI_MI2S_TX", "SEC_MI2S_TX", "TERT_MI2S_TX", "QUAT_MI2S_TX"}; static const struct soc_enum msm_route_ext_ec_ref_rx_enum[] = { SOC_ENUM_SINGLE_EXT(5, ext_ec_ref_rx), }; static const struct snd_kcontrol_new voc_ext_ec_mux = SOC_DAPM_ENUM_EXT("VOC_EXT_EC MUX Mux", msm_route_ext_ec_ref_rx_enum[0], msm_routing_ext_ec_get, msm_routing_ext_ec_put); static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_I2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SPDIF_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SPDIF_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_0_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_MI2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERTIARY_MI2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new secondary_mi2s_rx2_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SECONDARY_MI2S_RX_SD1, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SECONDARY_MI2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mi2s_hl_mixer_controls[] = { SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_MI2S_RX , MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new hdmi_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new incall_music_delivery_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new incall_music2_delivery_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_4_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_4_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_4_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_4_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_6_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_6_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SLIMBUS_6_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SLIMBUS_6_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new int_fm_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT_FM_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul1_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SLIM_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul2_mixer_controls[] = { SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul4_mixer_controls[] = { SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul5_mixer_controls[] = { SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul6_mixer_controls[] = { SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul8_mixer_controls[] = { SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX, MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_PRI_I2S_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new sec_mi2s_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SECONDARY_MI2S_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new slimbus_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_0_RX , MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX , MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_RX , MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_0_RX , MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SLIMBUS_0_RX , MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new bt_sco_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_INT_BT_SCO_RX , MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX , MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_INT_BT_SCO_RX , MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT_BT_SCO_RX , MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_INT_BT_SCO_RX , MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new mi2s_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_MI2S_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new pri_mi2s_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new quat_mi2s_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new aux_pcm_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_AUXPCM_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new sec_aux_pcm_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = { SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice2", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("QCHAT", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new stub_rx_mixer_controls[] = { SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_EXTPROC_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_EXTPROC_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_EXTPROC_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), }; static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = { SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), }; static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = { SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("Voice2 Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("VoLTE Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), }; static const struct snd_kcontrol_new tx_voice_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX_Voice", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("MI2S_TX_Voice", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SLIM_0_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voice", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AFE_PCM_TX_Voice", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AUX_PCM_TX_Voice", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voice", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX_Voice", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX_Voice", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new tx_voice2_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX_Voice2", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("MI2S_TX_Voice2", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SLIM_0_TX_Voice2", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voice2", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AFE_PCM_TX_Voice2", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voice2", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX_Voice2", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX_Voice2", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_VOICE2, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new tx_volte_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX_VoLTE", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SLIM_0_TX_VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AFE_PCM_TX_VoLTE", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("MI2S_TX_VoLTE", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX_VoLTE", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX_VoLTE", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new tx_vowlan_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX_VoWLAN", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SLIM_0_TX_VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoWLAN", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AFE_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("MI2S_TX_VoWLAN", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX_VoWLAN", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX_VoWLAN", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new tx_voip_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("MI2S_TX_Voip", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SLIM_0_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voip", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AFE_PCM_TX_Voip", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AUX_PCM_TX_Voip", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX_Voip", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX_Voip", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX_Voip", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new tx_voice_stub_mixer_controls[] = { SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), }; static const struct snd_kcontrol_new tx_voice2_stub_mixer_controls[] = { SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, MSM_FRONTEND_DAI_VOICE2_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), }; static const struct snd_kcontrol_new tx_volte_stub_mixer_controls[] = { SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("SLIM_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, MSM_FRONTEND_DAI_VOLTE_STUB, 1, 0, msm_routing_get_voice_stub_mixer, msm_routing_put_voice_stub_mixer), }; static const struct snd_kcontrol_new tx_qchat_mixer_controls[] = { SOC_SINGLE_EXT("PRI_TX_QCHAT", MSM_BACKEND_DAI_PRI_I2S_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SLIM_0_TX_QCHAT", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_QCHAT", MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AFE_PCM_TX_QCHAT", MSM_BACKEND_DAI_AFE_PCM_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_AUXPCM_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_TX_QCHAT", MSM_BACKEND_DAI_SEC_AUXPCM_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("MI2S_TX_QCHAT", MSM_BACKEND_DAI_MI2S_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX_QCHAT", MSM_BACKEND_DAI_PRI_MI2S_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX_QCHAT", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, MSM_FRONTEND_DAI_QCHAT, 1, 0, msm_routing_get_voice_mixer, msm_routing_put_voice_mixer), }; static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new aux_pcm_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_RX, MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_AUXPCM_RX, MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_AUXPCM_RX, MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_RX, MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new sec_auxpcm_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_RX, MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new sbus_1_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_BACKEND_DAI_AFE_PCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX, MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new sbus_3_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("INTERNAL_BT_SCO_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_BACKEND_DAI_INT_BT_SCO_RX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("AFE_PCM_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_BACKEND_DAI_AFE_PCM_RX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("AUX_PCM_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_BACKEND_DAI_AUXPCM_RX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX, MSM_BACKEND_DAI_SLIMBUS_0_RX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new bt_sco_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX, MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new afe_pcm_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_AFE_PCM_RX, MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new hdmi_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_HDMI_RX, MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new sec_i2s_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SEC_I2S_RX, MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new mi2s_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_MI2S_RX, MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_RX, MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new primary_mi2s_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_RX, MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new quat_mi2s_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new tert_mi2s_rx_port_mixer_controls[] = { SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_BACKEND_DAI_PRI_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_BACKEND_DAI_TERTIARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX, MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), }; static const struct snd_kcontrol_new slim_fm_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new slim1_fm_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new slim3_fm_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls_fm = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls_pritx = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls_stub_slim = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls_stub_smi2s = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls_stub_tmi2s = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new htc_switch_mixer_controls_stub_qmi2s = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_htc_routing_get_switch_mixer, msm_htc_routing_put_switch_mixer); static const struct snd_kcontrol_new slim4_fm_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new pcm_rx_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer, msm_routing_put_fm_pcmrx_switch_mixer); static const struct snd_kcontrol_new pri_mi2s_rx_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new quat_mi2s_rx_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new hfp_aux_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct snd_kcontrol_new hfp_int_switch_mixer_controls = SOC_SINGLE_EXT("Switch", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_switch_mixer, msm_routing_put_switch_mixer); static const struct soc_enum lsm_mux_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mad_audio_mux_text), mad_audio_mux_text); static const struct snd_kcontrol_new lsm1_mux = SOC_DAPM_ENUM_EXT("LSM1 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm2_mux = SOC_DAPM_ENUM_EXT("LSM2 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm3_mux = SOC_DAPM_ENUM_EXT("LSM3 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm4_mux = SOC_DAPM_ENUM_EXT("LSM4 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm5_mux = SOC_DAPM_ENUM_EXT("LSM5 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm6_mux = SOC_DAPM_ENUM_EXT("LSM6 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm7_mux = SOC_DAPM_ENUM_EXT("LSM7 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const struct snd_kcontrol_new lsm8_mux = SOC_DAPM_ENUM_EXT("LSM8 MUX", lsm_mux_enum, msm_routing_lsm_mux_get, msm_routing_lsm_mux_put); static const char * const lsm_func_text[] = { "None", "AUDIO", "BEACON", "ULTRASOUND", "SWAUDIO", }; static const struct soc_enum lsm_func_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_func_text), lsm_func_text); static const struct snd_kcontrol_new lsm_function[] = { SOC_ENUM_EXT(SLIMBUS_0_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), SOC_ENUM_EXT(SLIMBUS_1_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), SOC_ENUM_EXT(SLIMBUS_2_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), SOC_ENUM_EXT(SLIMBUS_3_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), SOC_ENUM_EXT(SLIMBUS_4_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), SOC_ENUM_EXT(SLIMBUS_5_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), SOC_ENUM_EXT(TERT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum, msm_routing_lsm_func_get, msm_routing_lsm_func_put), }; static const char * const aanc_slim_0_rx_text[] = { "ZERO", "SLIMBUS_0_TX", "SLIMBUS_1_TX", "SLIMBUS_2_TX", "SLIMBUS_3_TX", "SLIMBUS_4_TX", "SLIMBUS_5_TX", "SLIMBUS_6_TX" }; static const struct soc_enum aanc_slim_0_rx_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(aanc_slim_0_rx_text), aanc_slim_0_rx_text); static const struct snd_kcontrol_new aanc_slim_0_rx_mux[] = { SOC_DAPM_ENUM_EXT("AANC_SLIM_0_RX MUX", aanc_slim_0_rx_enum, msm_routing_slim_0_rx_aanc_mux_get, msm_routing_slim_0_rx_aanc_mux_put) }; static const struct snd_kcontrol_new afe_tert_mi2s_vol_mixer_controls[] = { SOC_SINGLE_EXT_TLV("TERT_MI2S_RX Volume", SND_SOC_NOPM, 0, INT_RX_VOL_GAIN, 0, msm_routing_get_afe_tert_mi2s_vol_mixer, msm_routing_set_afe_tert_mi2s_vol_mixer, afe_mi2s_vol_gain), }; static const struct snd_kcontrol_new afe_quat_mi2s_vol_mixer_controls[] = { SOC_SINGLE_EXT_TLV("QUAT_MI2S_RX Volume", SND_SOC_NOPM, 0, INT_RX_VOL_GAIN, 0, msm_routing_get_afe_quat_mi2s_vol_mixer, msm_routing_set_afe_quat_mi2s_vol_mixer, afe_mi2s_vol_gain), }; static int msm_routing_get_stereo_to_custom_stereo_control( struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = is_custom_stereo_on; return 0; } static int msm_routing_put_stereo_to_custom_stereo_control( struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int flag = 0, i = 0, rc = 0, idx = 0; int be_index = 0, port_id, topo_id; unsigned int session_id = 0; uint16_t op_FL_ip_FL_weight; uint16_t op_FL_ip_FR_weight; uint16_t op_FR_ip_FL_weight; uint16_t op_FR_ip_FR_weight; flag = ucontrol->value.integer.value[0]; pr_debug("%s E flag %d\n", __func__, flag); if ((is_custom_stereo_on && flag) || (!is_custom_stereo_on && !flag)) { pr_err("%s: is_custom_stereo_on %d, flag %d\n", __func__, is_custom_stereo_on, flag); return 0; } is_custom_stereo_on = flag ? true : false; pr_debug("%s:is_custom_stereo_on %d\n", __func__, is_custom_stereo_on); for (be_index = 0; be_index < MSM_BACKEND_DAI_MAX; be_index++) { port_id = msm_bedais[be_index].port_id; if (!msm_bedais[be_index].active) continue; if ((port_id != SLIMBUS_0_RX) && (port_id != RT_PROXY_PORT_001_RX)) continue; for_each_set_bit(i, &msm_bedais[be_index].fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) { if (fe_dai_map[i][SESSION_TYPE_RX].perf_mode != LEGACY_PCM_MODE) goto skip_send_custom_stereo; session_id = fe_dai_map[i][SESSION_TYPE_RX].strm_id; if (is_custom_stereo_on) { op_FL_ip_FL_weight = Q14_GAIN_ZERO_POINT_FIVE; op_FL_ip_FR_weight = Q14_GAIN_ZERO_POINT_FIVE; op_FR_ip_FL_weight = Q14_GAIN_ZERO_POINT_FIVE; op_FR_ip_FR_weight = Q14_GAIN_ZERO_POINT_FIVE; } else { op_FL_ip_FL_weight = Q14_GAIN_UNITY; op_FL_ip_FR_weight = 0; op_FR_ip_FL_weight = 0; op_FR_ip_FR_weight = Q14_GAIN_UNITY; } for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) { unsigned long copp = session_copp_map[i] [SESSION_TYPE_RX][be_index]; if (!test_bit(idx, &copp)) goto skip_send_custom_stereo; topo_id = adm_get_topology_for_port_copp_idx( msm_bedais[be_index].port_id, idx); if (topo_id < 0) pr_debug("%s:Err:custom stereo topo %d", __func__, topo_id); pr_debug("idx %d\n", idx); if (topo_id == DS2_ADM_COPP_TOPOLOGY_ID) rc = msm_ds2_dap_set_custom_stereo_onoff (msm_bedais[be_index].port_id, idx, is_custom_stereo_on); else if (topo_id == DOLBY_ADM_COPP_TOPOLOGY_ID) rc = dolby_dap_set_custom_stereo_onoff( msm_bedais[be_index].port_id, idx, is_custom_stereo_on); else rc = msm_qti_pp_send_stereo_to_custom_stereo_cmd (msm_bedais[be_index].port_id, idx, session_id, op_FL_ip_FL_weight, op_FL_ip_FR_weight, op_FR_ip_FL_weight, op_FR_ip_FR_weight); if (rc < 0) skip_send_custom_stereo: pr_err("%s: err setting custom stereo\n", __func__); } } } return 0; } static const struct snd_kcontrol_new stereo_to_custom_stereo_controls[] = { SOC_SINGLE_EXT("Set Custom Stereo OnOff", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_stereo_to_custom_stereo_control, msm_routing_put_stereo_to_custom_stereo_control), }; static int msm_routing_get_app_type_cfg_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { return 0; } static int msm_routing_put_app_type_cfg_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i = 0, j; int num_app_types = ucontrol->value.integer.value[i++]; pr_debug("%s\n", __func__); memset(app_type_cfg, 0, MAX_APP_TYPES* sizeof(struct msm_pcm_routing_app_type_data)); if (num_app_types > MAX_APP_TYPES) { pr_err("%s: number of app types exceed the max supported\n", __func__); return -EINVAL; } for (j = 0; j < num_app_types; j++) { app_type_cfg[j].app_type = ucontrol->value.integer.value[i++]; app_type_cfg[j].sample_rate = ucontrol->value.integer.value[i++]; app_type_cfg[j].bit_width = ucontrol->value.integer.value[i++]; } return 0; } static const struct snd_kcontrol_new app_type_cfg_controls[] = { SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control, msm_routing_put_app_type_cfg_control), }; static int msm_routing_get_use_ds1_or_ds2_control( struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = is_ds2_on; return 0; } static int msm_routing_put_use_ds1_or_ds2_control( struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { is_ds2_on = ucontrol->value.integer.value[0]; return 0; } static const struct snd_kcontrol_new use_ds1_or_ds2_controls[] = { SOC_SINGLE_EXT("DS2 OnOff", SND_SOC_NOPM, 0, 1, 0, msm_routing_get_use_ds1_or_ds2_control, msm_routing_put_use_ds1_or_ds2_control), }; int msm_routing_get_rms_value_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int rc = 0; int be_idx = 0; char *param_value; int *update_param_value; uint32_t param_length = sizeof(uint32_t); uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t); param_value = kzalloc(param_length, GFP_KERNEL); if (!param_value) { pr_err("%s, param memory alloc failed\n", __func__); return -ENOMEM; } for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) if (msm_bedais[be_idx].port_id == SLIMBUS_0_TX) break; if ((be_idx < MSM_BACKEND_DAI_MAX) && msm_bedais[be_idx].active) { rc = adm_get_params(SLIMBUS_0_TX, 0, RMS_MODULEID_APPI_PASSTHRU, RMS_PARAM_FIRST_SAMPLE, param_length + param_payload_len, param_value); if (rc) { pr_err("%s: get parameters failed:%d\n", __func__, rc); kfree(param_value); return -EINVAL; } update_param_value = (int *)param_value; ucontrol->value.integer.value[0] = update_param_value[0]; pr_debug("%s: FROM DSP value[0] 0x%x\n", __func__, update_param_value[0]); } kfree(param_value); return 0; } static int msm_voc_session_id_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { voc_session_id = ucontrol->value.integer.value[0]; pr_debug("%s: voc_session_id=%u\n", __func__, voc_session_id); return 0; } static int msm_voc_session_id_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = voc_session_id; return 0; } static struct snd_kcontrol_new msm_voc_session_controls[] = { SOC_SINGLE_MULTI_EXT("Voc VSID", SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 1, msm_voc_session_id_get, msm_voc_session_id_put), }; static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int item; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; pr_debug("%s item is %d\n", __func__, ucontrol->value.enumerated.item[0]); mutex_lock(&routing_lock); item = ucontrol->value.enumerated.item[0]; if (item < e->max) { pr_debug("%s RX DAI ID %d TX DAI id %d\n", __func__, e->shift_l , e->values[item]); if (e->shift_l < MSM_BACKEND_DAI_MAX && e->values[item] < MSM_BACKEND_DAI_MAX) ret = afe_spk_prot_feed_back_cfg( msm_bedais[e->values[item]].port_id, msm_bedais[e->shift_l].port_id, 1, 0, 1); else { pr_debug("%s values are out of range item %d\n", __func__, e->values[item]); if (e->values[item] == MSM_BACKEND_DAI_MAX) ret = afe_spk_prot_feed_back_cfg(0, 0, 0, 0, 0); else ret = -EINVAL; } } else { pr_err("%s item value is out of range item\n", __func__); ret = -EINVAL; } mutex_unlock(&routing_lock); return ret; } static int spkr_prot_put_vi_rch_port(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int item; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; pr_debug("%s item is %d\n", __func__, ucontrol->value.enumerated.item[0]); mutex_lock(&routing_lock); item = ucontrol->value.enumerated.item[0]; if (item < e->max) { pr_debug("%s RX DAI ID %d TX DAI id %d\n", __func__, e->shift_l , e->values[item]); if (e->shift_l < MSM_BACKEND_DAI_MAX && e->values[item] < MSM_BACKEND_DAI_MAX) ret = afe_spk_prot_feed_back_cfg( msm_bedais[e->values[item]].port_id, msm_bedais[e->shift_l].port_id, 1, 1, 1); else { pr_debug("%s values are out of range item %d\n", __func__, e->values[item]); if (e->values[item] == MSM_BACKEND_DAI_MAX) ret = afe_spk_prot_feed_back_cfg(0, 0, 0, 0, 0); else ret = -EINVAL; } } else { pr_err("%s item value is out of range item\n", __func__); ret = -EINVAL; } mutex_unlock(&routing_lock); return ret; } static int spkr_prot_get_vi_lch_port(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s\n", __func__); return 0; } static int spkr_prot_get_vi_rch_port(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s\n", __func__); ucontrol->value.enumerated.item[0] = 0; return 0; } static const char * const slim0_rx_vi_fb_tx_lch_mux_text[] = { "ZERO", "SLIM4_TX" }; static const char * const slim0_rx_vi_fb_tx_rch_mux_text[] = { "ZERO", "SLIM4_TX" }; static const int const slim0_rx_vi_fb_tx_lch_value[] = { MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SLIMBUS_4_TX }; static const int const slim0_rx_vi_fb_tx_rch_value[] = { MSM_BACKEND_DAI_MAX, MSM_BACKEND_DAI_SLIMBUS_4_TX }; static const struct soc_enum slim0_rx_vi_fb_lch_mux_enum = SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_SLIMBUS_0_RX, 0, 0, ARRAY_SIZE(slim0_rx_vi_fb_tx_lch_mux_text), slim0_rx_vi_fb_tx_lch_mux_text, slim0_rx_vi_fb_tx_lch_value); static const struct soc_enum slim0_rx_vi_fb_rch_mux_enum = SOC_VALUE_ENUM_DOUBLE(0, MSM_BACKEND_DAI_SLIMBUS_0_RX, 0, 0, ARRAY_SIZE(slim0_rx_vi_fb_tx_rch_mux_text), slim0_rx_vi_fb_tx_rch_mux_text, slim0_rx_vi_fb_tx_rch_value); static const struct snd_kcontrol_new slim0_rx_vi_fb_lch_mux = SOC_DAPM_ENUM_EXT("SLIM0_RX_VI_FB_LCH_MUX", slim0_rx_vi_fb_lch_mux_enum, spkr_prot_get_vi_lch_port, spkr_prot_put_vi_lch_port); static const struct snd_kcontrol_new slim0_rx_vi_fb_rch_mux = SOC_DAPM_ENUM_EXT("SLIM0_RX_VI_FB_RCH_MUX", slim0_rx_vi_fb_rch_mux_enum, spkr_prot_get_vi_rch_port, spkr_prot_put_vi_rch_port); static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL9", "MultiMedia9 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL10", "MultiMedia10 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL11", "MultiMedia11 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL12", "MultiMedia12 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL13", "MultiMedia13 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL14", "MultiMedia14 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL15", "MultiMedia15 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL16", "MultiMedia16 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VOICE2_DL", "Voice2 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VOICE2_UL", "Voice2 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VoWLAN_DL", "VoWLAN Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VoWLAN_UL", "VoWLAN Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIM0_UL_HL", "SLIMBUS0_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("CPE_LSM_UL_HL", "CPE LSM capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIM1_DL_HL", "SLIMBUS1_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIM1_UL_HL", "SLIMBUS1_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIM3_DL_HL", "SLIMBUS3_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIM3_UL_HL", "SLIMBUS3_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIM4_DL_HL", "SLIMBUS4_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIM4_UL_HL", "SLIMBUS4_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("INTFM_DL_HL", "INT_FM_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("INTFM_UL_HL", "INT_FM_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("INTHFP_DL_HL", "INT_HFP_BT_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("INTHFP_UL_HL", "INT_HFP_BT_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("HDMI_DL_HL", "HDMI_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_I2S_DL_HL", "SEC_I2S_RX_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_MI2S_DL_HL", "Primary MI2S_RX Hostless Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_DL_HL", "Secondary MI2S_RX Hostless Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_MI2S_DL_HL", "Quaternary MI2S_RX Hostless Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("AUXPCM_DL_HL", "AUXPCM_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("AUXPCM_UL_HL", "AUXPCM_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MI2S_UL_HL", "MI2S_TX_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("PRI_MI2S_UL_HL", "Primary MI2S_TX Hostless Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MI2S_DL_HL", "MI2S_RX_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("DTMF_DL_HL", "DTMF_RX_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_UL_HL", "QUAT_MI2S_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_MI2S_UL_HL", "TERT_MI2S_HOSTLESS Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_MI2S_DL_HL", "QUAT_MI2S_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_MI2S_DL_HL", "TERT_MI2S_HOSTLESS Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM1_UL_HL", "Listen 1 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM2_UL_HL", "Listen 2 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM3_UL_HL", "Listen 3 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM4_UL_HL", "Listen 4 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM5_UL_HL", "Listen 5 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM6_UL_HL", "Listen 6 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM7_UL_HL", "Listen 7 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("LSM8_UL_HL", "Listen 8 Audio Service Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("QCHAT_DL", "QCHAT Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("QCHAT_UL", "QCHAT Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("PRI_I2S_RX", "Primary I2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_I2S_RX", "Secondary I2S Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_OUT("SPDIF_RX", "SPDIF Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1", "Secondary MI2S Playback SD1", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MI2S_TX", "MI2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("INT_BT_SCO_RX", "Internal BT-SCO Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_IN("INT_BT_SCO_TX", "Internal BT-SCO Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("INT_FM_RX", "Internal FM Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_IN("INT_FM_TX", "Internal FM Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("PCM_RX", "AFE Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_IN("PCM_TX", "AFE Capture", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_OUT("VOICE_PLAYBACK_TX", "Voice Farend Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_OUT("VOICE2_PLAYBACK_TX", "Voice2 Farend Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_IN("INCALL_RECORD_TX", "Voice Uplink Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("INCALL_RECORD_RX", "Voice Downlink Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("AUX_PCM_TX", "AUX PCM Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_AUX_PCM_RX", "Sec AUX PCM Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_AUX_PCM_TX", "Sec AUX PCM Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VOICE_STUB_UL", "VOICE_STUB Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VOICE2_STUB_DL", "VOICE2_STUB Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VOICE2_STUB_UL", "VOICE2_STUB Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VOLTE_STUB_DL", "VOLTE_STUB Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("VOLTE_STUB_UL", "VOLTE_STUB Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("STUB_RX", "Stub Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("STUB_TX", "Stub Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("STUB_1_TX", "Stub1 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0 , 0), SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0), SND_SOC_DAPM_SWITCH("SLIMBUS_DL_HL", SND_SOC_NOPM, 0, 0, &slim_fm_switch_mixer_controls), SND_SOC_DAPM_SWITCH("SLIMBUS1_DL_HL", SND_SOC_NOPM, 0, 0, &slim1_fm_switch_mixer_controls), SND_SOC_DAPM_SWITCH("SLIMBUS3_DL_HL", SND_SOC_NOPM, 0, 0, &slim3_fm_switch_mixer_controls), SND_SOC_DAPM_SWITCH("SLIMBUS4_DL_HL", SND_SOC_NOPM, 0, 0, &slim4_fm_switch_mixer_controls), SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0, &pcm_rx_switch_mixer_controls), SND_SOC_DAPM_SWITCH("PRI_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0, &pri_mi2s_rx_switch_mixer_controls), SND_SOC_DAPM_SWITCH("QUAT_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0, &quat_mi2s_rx_switch_mixer_controls), SND_SOC_DAPM_SWITCH("HFP_AUX_UL_HL", SND_SOC_NOPM, 0, 0, &hfp_aux_switch_mixer_controls), SND_SOC_DAPM_SWITCH("HFP_INT_UL_HL", SND_SOC_NOPM, 0, 0, &hfp_int_switch_mixer_controls), SND_SOC_DAPM_AIF_IN("MM_STUB_DL", "MM_STUB Playback", 0, 0, 0, 0), SND_SOC_DAPM_SWITCH("QUAT_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls), SND_SOC_DAPM_SWITCH("QUAT_MI2S_RX_DL_HL_FM", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls_fm), SND_SOC_DAPM_SWITCH("PRI_MI2S_TX_HL", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls_pritx), SND_SOC_DAPM_SWITCH("SEC_MI2S_DL_COMPR_STUB", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls_stub_smi2s), SND_SOC_DAPM_SWITCH("TERT_MI2S_DL_COMPR_STUB", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls_stub_tmi2s), SND_SOC_DAPM_SWITCH("QUAT_MI2S_DL_COMPR_STUB", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls_stub_qmi2s), SND_SOC_DAPM_SWITCH("SLIMBUS_DL_COMPR_STUB", SND_SOC_NOPM, 0, 0, &htc_switch_mixer_controls_stub_slim), SND_SOC_DAPM_MUX("LSM1 MUX", SND_SOC_NOPM, 0, 0, &lsm1_mux), SND_SOC_DAPM_MUX("LSM2 MUX", SND_SOC_NOPM, 0, 0, &lsm2_mux), SND_SOC_DAPM_MUX("LSM3 MUX", SND_SOC_NOPM, 0, 0, &lsm3_mux), SND_SOC_DAPM_MUX("LSM4 MUX", SND_SOC_NOPM, 0, 0, &lsm4_mux), SND_SOC_DAPM_MUX("LSM5 MUX", SND_SOC_NOPM, 0, 0, &lsm5_mux), SND_SOC_DAPM_MUX("LSM6 MUX", SND_SOC_NOPM, 0, 0, &lsm6_mux), SND_SOC_DAPM_MUX("LSM7 MUX", SND_SOC_NOPM, 0, 0, &lsm7_mux), SND_SOC_DAPM_MUX("LSM8 MUX", SND_SOC_NOPM, 0, 0, &lsm8_mux), SND_SOC_DAPM_MUX("SLIM_0_RX AANC MUX", SND_SOC_NOPM, 0, 0, aanc_slim_0_rx_mux), SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0, pri_i2s_rx_mixer_controls, ARRAY_SIZE(pri_i2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_RX Audio Mixer", SND_SOC_NOPM, 0, 0, sec_i2s_rx_mixer_controls, ARRAY_SIZE(sec_i2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0, slimbus_rx_mixer_controls, ARRAY_SIZE(slimbus_rx_mixer_controls)), SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0, hdmi_mixer_controls, ARRAY_SIZE(hdmi_mixer_controls)), SND_SOC_DAPM_MIXER("SPDIF_RX Audio Mixer", SND_SOC_NOPM, 0, 0, spdif_rx_mixer_controls, ARRAY_SIZE(spdif_rx_mixer_controls)), SND_SOC_DAPM_MIXER("MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0, mi2s_rx_mixer_controls, ARRAY_SIZE(mi2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0, quaternary_mi2s_rx_mixer_controls, ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("TERT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0, tertiary_mi2s_rx_mixer_controls, ARRAY_SIZE(tertiary_mi2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0, secondary_mi2s_rx_mixer_controls, ARRAY_SIZE(secondary_mi2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_MI2S_RX_SD1 Audio Mixer", SND_SOC_NOPM, 0, 0, secondary_mi2s_rx2_mixer_controls, ARRAY_SIZE(secondary_mi2s_rx2_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0, mi2s_hl_mixer_controls, ARRAY_SIZE(mi2s_hl_mixer_controls)), SND_SOC_DAPM_MIXER("PRI_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0, primary_mi2s_rx_mixer_controls, ARRAY_SIZE(primary_mi2s_rx_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0, mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0, mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0, mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0, mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia6 Mixer", SND_SOC_NOPM, 0, 0, mmul6_mixer_controls, ARRAY_SIZE(mmul6_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0, mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)), SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0, auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0, sec_auxpcm_rx_mixer_controls, ARRAY_SIZE(sec_auxpcm_rx_mixer_controls)), SND_SOC_DAPM_MIXER("Incall_Music Audio Mixer", SND_SOC_NOPM, 0, 0, incall_music_delivery_mixer_controls, ARRAY_SIZE(incall_music_delivery_mixer_controls)), SND_SOC_DAPM_MIXER("Incall_Music_2 Audio Mixer", SND_SOC_NOPM, 0, 0, incall_music2_delivery_mixer_controls, ARRAY_SIZE(incall_music2_delivery_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0, slimbus_4_rx_mixer_controls, ARRAY_SIZE(slimbus_4_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_6_RX Audio Mixer", SND_SOC_NOPM, 0, 0, slimbus_6_rx_mixer_controls, ARRAY_SIZE(slimbus_6_rx_mixer_controls)), SND_SOC_DAPM_MIXER("PRI_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, pri_rx_voice_mixer_controls, ARRAY_SIZE(pri_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, sec_i2s_rx_voice_mixer_controls, ARRAY_SIZE(sec_i2s_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_MI2S_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, sec_mi2s_rx_voice_mixer_controls, ARRAY_SIZE(sec_mi2s_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("SLIM_0_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, slimbus_rx_voice_mixer_controls, ARRAY_SIZE(slimbus_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, bt_sco_rx_voice_mixer_controls, ARRAY_SIZE(bt_sco_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("AFE_PCM_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, afe_pcm_rx_voice_mixer_controls, ARRAY_SIZE(afe_pcm_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("AUX_PCM_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, aux_pcm_rx_voice_mixer_controls, ARRAY_SIZE(aux_pcm_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, sec_aux_pcm_rx_voice_mixer_controls, ARRAY_SIZE(sec_aux_pcm_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("HDMI_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, hdmi_rx_voice_mixer_controls, ARRAY_SIZE(hdmi_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("MI2S_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, mi2s_rx_voice_mixer_controls, ARRAY_SIZE(mi2s_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("PRI_MI2S_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, pri_mi2s_rx_voice_mixer_controls, ARRAY_SIZE(pri_mi2s_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("QUAT_MI2S_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, quat_mi2s_rx_voice_mixer_controls, ARRAY_SIZE(quat_mi2s_rx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("Voice_Tx Mixer", SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls, ARRAY_SIZE(tx_voice_mixer_controls)), SND_SOC_DAPM_MIXER("Voice2_Tx Mixer", SND_SOC_NOPM, 0, 0, tx_voice2_mixer_controls, ARRAY_SIZE(tx_voice2_mixer_controls)), SND_SOC_DAPM_MIXER("Voip_Tx Mixer", SND_SOC_NOPM, 0, 0, tx_voip_mixer_controls, ARRAY_SIZE(tx_voip_mixer_controls)), SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer", SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls, ARRAY_SIZE(tx_volte_mixer_controls)), SND_SOC_DAPM_MIXER("VoWLAN_Tx Mixer", SND_SOC_NOPM, 0, 0, tx_vowlan_mixer_controls, ARRAY_SIZE(tx_vowlan_mixer_controls)), SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0, int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)), SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0, int_fm_rx_mixer_controls, ARRAY_SIZE(int_fm_rx_mixer_controls)), SND_SOC_DAPM_MIXER("AFE_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0, afe_pcm_rx_mixer_controls, ARRAY_SIZE(afe_pcm_rx_mixer_controls)), SND_SOC_DAPM_MIXER("Voice Stub Tx Mixer", SND_SOC_NOPM, 0, 0, tx_voice_stub_mixer_controls, ARRAY_SIZE(tx_voice_stub_mixer_controls)), SND_SOC_DAPM_MIXER("Voice2 Stub Tx Mixer", SND_SOC_NOPM, 0, 0, tx_voice2_stub_mixer_controls, ARRAY_SIZE(tx_voice2_stub_mixer_controls)), SND_SOC_DAPM_MIXER("VoLTE Stub Tx Mixer", SND_SOC_NOPM, 0, 0, tx_volte_stub_mixer_controls, ARRAY_SIZE(tx_volte_stub_mixer_controls)), SND_SOC_DAPM_MIXER("STUB_RX Mixer", SND_SOC_NOPM, 0, 0, stub_rx_mixer_controls, ARRAY_SIZE(stub_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Mixer", SND_SOC_NOPM, 0, 0, slimbus_1_rx_mixer_controls, ARRAY_SIZE(slimbus_1_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_3_RX_Voice Mixer", SND_SOC_NOPM, 0, 0, slimbus_3_rx_mixer_controls, ARRAY_SIZE(slimbus_3_rx_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Port Mixer", SND_SOC_NOPM, 0, 0, sbus_0_rx_port_mixer_controls, ARRAY_SIZE(sbus_0_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("AUX_PCM_RX Port Mixer", SND_SOC_NOPM, 0, 0, aux_pcm_rx_port_mixer_controls, ARRAY_SIZE(aux_pcm_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_AUXPCM_RX Port Mixer", SND_SOC_NOPM, 0, 0, sec_auxpcm_rx_port_mixer_controls, ARRAY_SIZE(sec_auxpcm_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Port Mixer", SND_SOC_NOPM, 0, 0, sbus_1_rx_port_mixer_controls, ARRAY_SIZE(sbus_1_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Port Mixer", SND_SOC_NOPM, 0, 0, bt_sco_rx_port_mixer_controls, ARRAY_SIZE(bt_sco_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("AFE_PCM_RX Port Mixer", SND_SOC_NOPM, 0, 0, afe_pcm_rx_port_mixer_controls, ARRAY_SIZE(afe_pcm_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("HDMI_RX Port Mixer", SND_SOC_NOPM, 0, 0, hdmi_rx_port_mixer_controls, ARRAY_SIZE(hdmi_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("SEC_I2S_RX Port Mixer", SND_SOC_NOPM, 0, 0, sec_i2s_rx_port_mixer_controls, ARRAY_SIZE(sec_i2s_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Port Mixer", SND_SOC_NOPM, 0, 0, sbus_3_rx_port_mixer_controls, ARRAY_SIZE(sbus_3_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0, mi2s_rx_port_mixer_controls, ARRAY_SIZE(mi2s_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("PRI_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0, primary_mi2s_rx_port_mixer_controls, ARRAY_SIZE(primary_mi2s_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0, quat_mi2s_rx_port_mixer_controls, ARRAY_SIZE(quat_mi2s_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("TERT_MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0, tert_mi2s_rx_port_mixer_controls, ARRAY_SIZE(tert_mi2s_rx_port_mixer_controls)), SND_SOC_DAPM_MIXER("QCHAT_Tx Mixer", SND_SOC_NOPM, 0, 0, tx_qchat_mixer_controls, ARRAY_SIZE(tx_qchat_mixer_controls)), SND_SOC_DAPM_OUTPUT("BE_OUT"), SND_SOC_DAPM_INPUT("BE_IN"), SND_SOC_DAPM_MUX("SLIM0_RX_VI_FB_LCH_MUX", SND_SOC_NOPM, 0, 0, &slim0_rx_vi_fb_lch_mux), SND_SOC_DAPM_MUX("SLIM0_RX_VI_FB_RCH_MUX", SND_SOC_NOPM, 0, 0, &slim0_rx_vi_fb_rch_mux), SND_SOC_DAPM_MUX("VOC_EXT_EC MUX", SND_SOC_NOPM, 0, 0, &voc_ext_ec_mux), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL1 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul1), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL2 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul2), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL4 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul4), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL5 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul5), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL6 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul6), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL8 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul8), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul9), }; static const struct snd_soc_dapm_route intercon[] = { {"PRI_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"PRI_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"PRI_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"PRI_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"PRI_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"PRI_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"PRI_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"PRI_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"PRI_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"PRI_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"PRI_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"PRI_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"PRI_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"PRI_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"PRI_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"PRI_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PRI_I2S_RX", NULL, "PRI_RX Audio Mixer"}, {"SEC_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SEC_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SEC_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"SEC_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"SEC_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SEC_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"SEC_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"SEC_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"SEC_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"SEC_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"SEC_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"SEC_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"SEC_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"SEC_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SEC_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_I2S_RX", NULL, "SEC_RX Audio Mixer"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"}, {"SLIMBUS_DL_COMPR_STUB", "Switch", "MM_STUB_DL"}, {"SLIMBUS_0_RX", NULL, "SLIMBUS_DL_COMPR_STUB"}, {"HDMI Mixer", "MultiMedia1", "MM_DL1"}, {"HDMI Mixer", "MultiMedia2", "MM_DL2"}, {"HDMI Mixer", "MultiMedia3", "MM_DL3"}, {"HDMI Mixer", "MultiMedia4", "MM_DL4"}, {"HDMI Mixer", "MultiMedia5", "MM_DL5"}, {"HDMI Mixer", "MultiMedia6", "MM_DL6"}, {"HDMI Mixer", "MultiMedia7", "MM_DL7"}, {"HDMI Mixer", "MultiMedia8", "MM_DL8"}, {"HDMI Mixer", "MultiMedia9", "MM_DL9"}, {"HDMI Mixer", "MultiMedia10", "MM_DL10"}, {"HDMI Mixer", "MultiMedia11", "MM_DL11"}, {"HDMI Mixer", "MultiMedia12", "MM_DL12"}, {"HDMI Mixer", "MultiMedia13", "MM_DL13"}, {"HDMI Mixer", "MultiMedia14", "MM_DL14"}, {"HDMI Mixer", "MultiMedia15", "MM_DL15"}, {"HDMI Mixer", "MultiMedia16", "MM_DL16"}, {"HDMI", NULL, "HDMI Mixer"}, {"SPDIF_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SPDIF_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SPDIF_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"SPDIF_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"SPDIF_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SPDIF_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"SPDIF_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"SPDIF_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"SPDIF_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"SPDIF_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"SPDIF_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"SPDIF_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"SPDIF_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"SPDIF_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SPDIF_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SPDIF_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SPDIF_RX", NULL, "SPDIF_RX Audio Mixer"}, {"Incall_Music Audio Mixer", "MultiMedia1", "MM_DL1"}, {"Incall_Music Audio Mixer", "MultiMedia2", "MM_DL2"}, {"Incall_Music Audio Mixer", "MultiMedia5", "MM_DL5"}, {"Incall_Music Audio Mixer", "MultiMedia9", "MM_DL9"}, {"VOICE_PLAYBACK_TX", NULL, "Incall_Music Audio Mixer"}, {"Incall_Music_2 Audio Mixer", "MultiMedia1", "MM_DL1"}, {"Incall_Music_2 Audio Mixer", "MultiMedia2", "MM_DL2"}, {"Incall_Music_2 Audio Mixer", "MultiMedia5", "MM_DL5"}, {"Incall_Music_2 Audio Mixer", "MultiMedia9", "MM_DL9"}, {"VOICE2_PLAYBACK_TX", NULL, "Incall_Music_2 Audio Mixer"}, {"SLIMBUS_4_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SLIMBUS_4_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SLIMBUS_4_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SLIMBUS_4_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"SLIMBUS_4_RX", NULL, "SLIMBUS_4_RX Audio Mixer"}, {"SLIMBUS_6_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SLIMBUS_6_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SLIMBUS_6_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SLIMBUS_6_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"}, {"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"}, {"MultiMedia4 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"}, {"MultiMedia8 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"}, {"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"}, {"MultiMedia4 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"}, {"MultiMedia8 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"}, {"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"}, {"MultiMedia1 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"}, {"MultiMedia8 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"}, {"MultiMedia4 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia8 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia2 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"MultiMedia4 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"MI2S_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"MI2S_RX", NULL, "MI2S_RX Audio Mixer"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"QUAT_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Audio Mixer"}, {"QUAT_MI2S_DL_COMPR_STUB", "Switch", "MM_STUB_DL"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_DL_COMPR_STUB"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"TERT_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Audio Mixer"}, {"TERT_MI2S_DL_COMPR_STUB", "Switch", "MM_STUB_DL"}, {"TERT_MI2S_RX", NULL, "TERT_MI2S_DL_COMPR_STUB"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Audio Mixer"}, {"SEC_MI2S_DL_COMPR_STUB", "Switch", "MM_STUB_DL"}, {"SEC_MI2S_RX", NULL, "SEC_MI2S_DL_COMPR_STUB"}, {"SEC_MI2S_RX_SD1 Audio Mixer", "MultiMedia6", "MM_DL6"}, {"SEC_MI2S_RX_SD1", NULL, "SEC_MI2S_RX_SD1 Audio Mixer"}, {"SEC_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"SEC_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"PRI_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Audio Mixer"}, {"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"}, {"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"}, {"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"}, {"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"}, {"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"MultiMedia5 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"}, {"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"}, {"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"}, {"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"MultiMedia6 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia6", "MM_UL6"}, {"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Audio Mixer"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"INTERNAL_FM_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"INT_FM_RX", NULL, "INTERNAL_FM_RX Audio Mixer"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"AFE_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PCM_RX", NULL, "AFE_PCM_RX Audio Mixer"}, {"MultiMedia1 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia4 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia5 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia6 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia8 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia1 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MultiMedia4 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MM_UL1", NULL, "MultiMedia1 Mixer"}, {"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MM_UL2", NULL, "MultiMedia2 Mixer"}, {"MM_UL4", NULL, "MultiMedia4 Mixer"}, {"MM_UL5", NULL, "MultiMedia5 Mixer"}, {"MM_UL6", NULL, "MultiMedia6 Mixer"}, {"MM_UL8", NULL, "MultiMedia8 Mixer"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia5", "MM_DL5"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_DL6"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia7", "MM_DL7"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia8", "MM_DL8"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia9", "MM_DL9"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia10", "MM_DL10"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia11", "MM_DL11"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia12", "MM_DL12"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia13", "MM_DL13"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_UL6"}, {"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX Audio Mixer"}, {"MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"}, {"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"PRI_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"PRI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"PRI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"PRI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"PRI_I2S_RX", NULL, "PRI_RX_Voice Mixer"}, {"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"SEC_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"SEC_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"SEC_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"SEC_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"}, {"SEC_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"SEC_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"SEC_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"SEC_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"SEC_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"SEC_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"SEC_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"SEC_MI2S_RX", NULL, "SEC_MI2S_RX_Voice Mixer"}, {"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"SLIM_0_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"SLIM_0_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"SLIM_0_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"SLIM_0_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"SLIM_0_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"SLIM_0_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"SLIM_0_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"}, {"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"AFE_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"AFE_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"AFE_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"AFE_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"}, {"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"AUX_PCM_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"AUX_PCM_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"}, {"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"SEC_AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"SEC_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"SEC_AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"SEC_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"SEC_AUX_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX_Voice Mixer"}, {"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"HDMI_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"HDMI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"HDMI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"HDMI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"HDMI", NULL, "HDMI_RX_Voice Mixer"}, {"HDMI", NULL, "HDMI_DL_HL"}, {"MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"}, {"PRI_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"PRI_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"PRI_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"PRI_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"PRI_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"PRI_MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"PRI_MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"PRI_MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"PRI_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"PRI_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"}, {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_Voice Mixer"}, {"QUAT_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"}, {"QUAT_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"QUAT_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_Voice Mixer"}, {"VOC_EXT_EC MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"VOC_EXT_EC MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"VOC_EXT_EC MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"VOC_EXT_EC MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"CS-VOICE_UL1", NULL, "VOC_EXT_EC MUX"}, {"VOIP_UL", NULL, "VOC_EXT_EC MUX"}, {"VoLTE_UL", NULL, "VOC_EXT_EC MUX"}, {"VOICE2_UL", NULL, "VOC_EXT_EC MUX"}, {"VoWLAN_UL", NULL, "VOC_EXT_EC MUX"}, {"AUDIO_REF_EC_UL1 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL1 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL1 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL1 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"AUDIO_REF_EC_UL2 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL2 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL2 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL2 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"AUDIO_REF_EC_UL4 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL4 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL4 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL4 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"AUDIO_REF_EC_UL5 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL5 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL5 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL5 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"AUDIO_REF_EC_UL6 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL6 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL6 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL6 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"AUDIO_REF_EC_UL8 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL8 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL8 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL8 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"AUDIO_REF_EC_UL9 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"}, {"AUDIO_REF_EC_UL9 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"}, {"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"}, {"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"}, {"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"}, {"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"}, {"MM_UL4", NULL, "AUDIO_REF_EC_UL4 MUX"}, {"MM_UL5", NULL, "AUDIO_REF_EC_UL5 MUX"}, {"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"}, {"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"}, {"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"}, {"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"}, {"Voice_Tx Mixer", "PRI_MI2S_TX_Voice", "PRI_MI2S_TX"}, {"Voice_Tx Mixer", "MI2S_TX_Voice", "MI2S_TX"}, {"Voice_Tx Mixer", "TERT_MI2S_TX_Voice", "TERT_MI2S_TX"}, {"Voice_Tx Mixer", "SLIM_0_TX_Voice", "SLIMBUS_0_TX"}, {"Voice_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice", "INT_BT_SCO_TX"}, {"Voice_Tx Mixer", "AFE_PCM_TX_Voice", "PCM_TX"}, {"Voice_Tx Mixer", "AUX_PCM_TX_Voice", "AUX_PCM_TX"}, {"Voice_Tx Mixer", "SEC_AUX_PCM_TX_Voice", "SEC_AUX_PCM_TX"}, {"CS-VOICE_UL1", NULL, "Voice_Tx Mixer"}, {"Voice2_Tx Mixer", "PRI_TX_Voice2", "PRI_I2S_TX"}, {"Voice2_Tx Mixer", "PRI_MI2S_TX_Voice2", "PRI_MI2S_TX"}, {"Voice2_Tx Mixer", "MI2S_TX_Voice2", "MI2S_TX"}, {"Voice2_Tx Mixer", "TERT_MI2S_TX_Voice2", "TERT_MI2S_TX"}, {"Voice2_Tx Mixer", "SLIM_0_TX_Voice2", "SLIMBUS_0_TX"}, {"Voice2_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice2", "INT_BT_SCO_TX"}, {"Voice2_Tx Mixer", "AFE_PCM_TX_Voice2", "PCM_TX"}, {"Voice2_Tx Mixer", "AUX_PCM_TX_Voice2", "AUX_PCM_TX"}, {"Voice2_Tx Mixer", "SEC_AUX_PCM_TX_Voice2", "SEC_AUX_PCM_TX"}, {"VOICE2_UL", NULL, "Voice2_Tx Mixer"}, {"VoLTE_Tx Mixer", "PRI_TX_VoLTE", "PRI_I2S_TX"}, {"VoLTE_Tx Mixer", "SLIM_0_TX_VoLTE", "SLIMBUS_0_TX"}, {"VoLTE_Tx Mixer", "INTERNAL_BT_SCO_TX_VoLTE", "INT_BT_SCO_TX"}, {"VoLTE_Tx Mixer", "AFE_PCM_TX_VoLTE", "PCM_TX"}, {"VoLTE_Tx Mixer", "AUX_PCM_TX_VoLTE", "AUX_PCM_TX"}, {"VoLTE_Tx Mixer", "SEC_AUX_PCM_TX_VoLTE", "SEC_AUX_PCM_TX"}, {"VoLTE_Tx Mixer", "MI2S_TX_VoLTE", "MI2S_TX"}, {"VoLTE_Tx Mixer", "PRI_MI2S_TX_VoLTE", "PRI_MI2S_TX"}, {"VoLTE_Tx Mixer", "TERT_MI2S_TX_VoLTE", "TERT_MI2S_TX"}, {"VoLTE_UL", NULL, "VoLTE_Tx Mixer"}, {"VoWLAN_Tx Mixer", "PRI_TX_VoWLAN", "PRI_I2S_TX"}, {"VoWLAN_Tx Mixer", "SLIM_0_TX_VoWLAN", "SLIMBUS_0_TX"}, {"VoWLAN_Tx Mixer", "INTERNAL_BT_SCO_TX_VoWLAN", "INT_BT_SCO_TX"}, {"VoWLAN_Tx Mixer", "AFE_PCM_TX_VoWLAN", "PCM_TX"}, {"VoWLAN_Tx Mixer", "AUX_PCM_TX_VoWLAN", "AUX_PCM_TX"}, {"VoWLAN_Tx Mixer", "SEC_AUX_PCM_TX_VoWLAN", "SEC_AUX_PCM_TX"}, {"VoWLAN_Tx Mixer", "MI2S_TX_VoWLAN", "MI2S_TX"}, {"VoWLAN_Tx Mixer", "PRI_MI2S_TX_VoWLAN", "PRI_MI2S_TX"}, {"VoWLAN_Tx Mixer", "TERT_MI2S_TX_VoWLAN", "TERT_MI2S_TX"}, {"VoWLAN_UL", NULL, "VoWLAN_Tx Mixer"}, {"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"}, {"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"}, {"Voip_Tx Mixer", "TERT_MI2S_TX_Voip", "TERT_MI2S_TX"}, {"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"}, {"Voip_Tx Mixer", "INTERNAL_BT_SCO_TX_Voip", "INT_BT_SCO_TX"}, {"Voip_Tx Mixer", "AFE_PCM_TX_Voip", "PCM_TX"}, {"Voip_Tx Mixer", "AUX_PCM_TX_Voip", "AUX_PCM_TX"}, {"Voip_Tx Mixer", "SEC_AUX_PCM_TX_Voip", "SEC_AUX_PCM_TX"}, {"Voip_Tx Mixer", "PRI_MI2S_TX_Voip", "PRI_MI2S_TX"}, {"VOIP_UL", NULL, "Voip_Tx Mixer"}, {"SLIMBUS_DL_HL", "Switch", "SLIM0_DL_HL"}, {"SLIMBUS_0_RX", NULL, "SLIMBUS_DL_HL"}, {"QUAT_MI2S_RX_DL_HL", "Switch", "QUAT_MI2S_DL_HL"}, {"QUAT_MI2S_RX_DL_HL_FM", "Switch", "SLIM0_DL_HL"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_DL_HL_FM"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_DL_HL"}, {"QUAT_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"SLIMBUS1_DL_HL", "Switch", "SLIM1_DL_HL"}, {"SLIMBUS_1_RX", NULL, "SLIMBUS1_DL_HL"}, {"SLIMBUS3_DL_HL", "Switch", "SLIM3_DL_HL"}, {"SLIMBUS_3_RX", NULL, "SLIMBUS3_DL_HL"}, {"SLIMBUS4_DL_HL", "Switch", "SLIM4_DL_HL"}, {"SLIMBUS_4_RX", NULL, "SLIMBUS4_DL_HL"}, {"SLIM0_UL_HL", NULL, "SLIMBUS_0_TX"}, {"SLIM1_UL_HL", NULL, "SLIMBUS_1_TX"}, {"SLIM3_UL_HL", NULL, "SLIMBUS_3_TX"}, {"SLIM4_UL_HL", NULL, "SLIMBUS_4_TX"}, {"LSM1 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM1 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM1 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM1 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM1 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM1 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"LSM1_UL_HL", NULL, "LSM1 MUX"}, {"LSM2 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM2 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM2 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM2 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM2 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM2 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"LSM2_UL_HL", NULL, "LSM2 MUX"}, {"LSM3 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM3 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM3 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM3 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM3 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM3 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"LSM3_UL_HL", NULL, "LSM3 MUX"}, {"LSM4 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM4 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM4 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM4 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM4 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM4 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"LSM4_UL_HL", NULL, "LSM4 MUX"}, {"LSM5 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM5 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM5 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM5 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM5 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM5 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"LSM5_UL_HL", NULL, "LSM5 MUX"}, {"LSM6 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM6 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM6 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM6 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM6 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM6_UL_HL", NULL, "LSM6 MUX"}, {"LSM7 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM7 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM7 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM7 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM7 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM7_UL_HL", NULL, "LSM7 MUX"}, {"LSM8 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"}, {"LSM8 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"}, {"LSM8 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"}, {"LSM8 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"}, {"LSM8 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"}, {"LSM8_UL_HL", NULL, "LSM8 MUX"}, {"CPE_LSM_UL_HL", NULL, "BE_IN"}, {"QCHAT_Tx Mixer", "PRI_TX_QCHAT", "PRI_I2S_TX"}, {"QCHAT_Tx Mixer", "SLIM_0_TX_QCHAT", "SLIMBUS_0_TX"}, {"QCHAT_Tx Mixer", "INTERNAL_BT_SCO_TX_QCHAT", "INT_BT_SCO_TX"}, {"QCHAT_Tx Mixer", "AFE_PCM_TX_QCHAT", "PCM_TX"}, {"QCHAT_Tx Mixer", "AUX_PCM_TX_QCHAT", "AUX_PCM_TX"}, {"QCHAT_Tx Mixer", "SEC_AUX_PCM_TX_QCHAT", "SEC_AUX_PCM_TX"}, {"QCHAT_Tx Mixer", "MI2S_TX_QCHAT", "MI2S_TX"}, {"QCHAT_Tx Mixer", "PRI_MI2S_TX_QCHAT", "PRI_MI2S_TX"}, {"QCHAT_Tx Mixer", "TERT_MI2S_TX_QCHAT", "TERT_MI2S_TX"}, {"QCHAT_UL", NULL, "QCHAT_Tx Mixer"}, {"INT_FM_RX", NULL, "INTFM_DL_HL"}, {"INTFM_UL_HL", NULL, "INT_FM_TX"}, {"INTHFP_UL_HL", "NULL", "HFP_AUX_UL_HL"}, {"HFP_AUX_UL_HL", "Switch", "SEC_AUX_PCM_TX"}, {"INTHFP_UL_HL", "NULL", "HFP_INT_UL_HL"}, {"HFP_INT_UL_HL", "Switch", "INT_BT_SCO_TX"}, {"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"}, {"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"}, {"MI2S_RX", NULL, "MI2S_DL_HL"}, {"MI2S_UL_HL", NULL, "MI2S_TX"}, {"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"}, {"PCM_RX", NULL, "PCM_RX_DL_HL"}, {"PRI_MI2S_RX_DL_HL", "Switch", "PRI_MI2S_DL_HL"}, {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_DL_HL"}, {"QUAT_MI2S_RX_DL_HL", "Switch", "QUAT_MI2S_DL_HL"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX_DL_HL"}, {"MI2S_UL_HL", NULL, "TERT_MI2S_TX"}, {"TERT_MI2S_UL_HL", NULL, "TERT_MI2S_TX"}, {"TERT_MI2S_RX", NULL, "TERT_MI2S_DL_HL"}, {"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"}, {"PRI_MI2S_TX_HL", "Switch", "PRI_MI2S_TX"}, {"PRI_MI2S_UL_HL", NULL, "PRI_MI2S_TX_HL"}, {"SEC_MI2S_RX", NULL, "SEC_MI2S_DL_HL"}, {"PRI_MI2S_RX", NULL, "PRI_MI2S_DL_HL"}, {"QUAT_MI2S_UL_HL", NULL, "QUAT_MI2S_TX"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_DL_HL"}, {"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"SLIMBUS_0_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"SLIMBUS_0_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"SLIMBUS_0_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"SLIMBUS_0_RX Port Mixer", "MI2S_TX", "MI2S_TX"}, {"SLIMBUS_0_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"SLIMBUS_0_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"SLIMBUS_0_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Port Mixer"}, {"AFE_PCM_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"AFE_PCM_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"PCM_RX", NULL, "AFE_PCM_RX Port Mixer"}, {"AUX_PCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"AUX_PCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"AUX_PCM_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"AUX_PCM_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"AUX_PCM_RX", NULL, "AUX_PCM_RX Port Mixer"}, {"SEC_AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"SEC_AUXPCM_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"SEC_AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"SEC_AUX_PCM_RX", NULL, "SEC_AUXPCM_RX Port Mixer"}, {"Voice Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"}, {"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"Voice Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"}, {"Voice Stub Tx Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"Voice Stub Tx Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"Voice Stub Tx Mixer", "MI2S_TX", "MI2S_TX"}, {"Voice Stub Tx Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"Voice Stub Tx Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"Voice Stub Tx Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"Voice Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"}, {"Voice Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"}, {"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"}, {"VoLTE Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"}, {"VoLTE Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"VoLTE Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"}, {"VoLTE Stub Tx Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"VoLTE Stub Tx Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"VoLTE Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"}, {"VoLTE Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"}, {"VoLTE Stub Tx Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"VoLTE Stub Tx Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"VOLTE_STUB_UL", NULL, "VoLTE Stub Tx Mixer"}, {"Voice2 Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"}, {"Voice2 Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"Voice2 Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"}, {"Voice2 Stub Tx Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"Voice2 Stub Tx Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"Voice2 Stub Tx Mixer", "SLIM_3_TX", "SLIMBUS_3_TX"}, {"Voice2 Stub Tx Mixer", "AFE_PCM_TX", "PCM_TX"}, {"Voice2 Stub Tx Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"Voice2 Stub Tx Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"VOICE2_STUB_UL", NULL, "Voice2 Stub Tx Mixer"}, {"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"STUB_RX Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"STUB_RX Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"STUB_RX", NULL, "STUB_RX Mixer"}, {"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"SLIMBUS_1_RX Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"SLIMBUS_1_RX Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"AFE_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"AFE_PCM_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"AFE_PCM_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"SLIMBUS_3_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"}, {"SLIMBUS_3_RX_Voice Mixer", "Voice2 Stub", "VOICE2_STUB_DL"}, {"SLIMBUS_3_RX_Voice Mixer", "VoLTE Stub", "VOLTE_STUB_DL"}, {"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX_Voice Mixer"}, {"SLIMBUS_1_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"SLIMBUS_1_RX Port Mixer", "AFE_PCM_TX", "PCM_TX"}, {"SLIMBUS_1_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Port Mixer"}, {"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Port Mixer"}, {"SLIMBUS_3_RX Port Mixer", "INTERNAL_BT_SCO_RX", "INT_BT_SCO_RX"}, {"SLIMBUS_3_RX Port Mixer", "MI2S_TX", "MI2S_TX"}, {"SLIMBUS_3_RX Port Mixer", "AFE_PCM_RX", "PCM_RX"}, {"SLIMBUS_3_RX Port Mixer", "AUX_PCM_RX", "AUX_PCM_RX"}, {"SLIMBUS_3_RX Port Mixer", "SLIM_0_RX", "SLIMBUS_0_RX"}, {"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Port Mixer"}, {"HDMI_RX Port Mixer", "MI2S_TX", "MI2S_TX"}, {"HDMI", NULL, "HDMI_RX Port Mixer"}, {"SEC_I2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"}, {"SEC_I2S_RX", NULL, "SEC_I2S_RX Port Mixer"}, {"MI2S_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, {"MI2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"}, {"MI2S_RX", NULL, "MI2S_RX Port Mixer"}, {"PRI_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"PRI_MI2S_RX Port Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"}, {"PRI_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"PRI_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"PRI_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"PRI_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Port Mixer"}, {"QUAT_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"QUAT_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"QUAT_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"QUAT_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"QUAT_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Port Mixer"}, {"TERT_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"TERT_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"}, {"TERT_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Port Mixer"}, {"BE_OUT", NULL, "PRI_I2S_RX"}, {"BE_OUT", NULL, "SEC_I2S_RX"}, {"BE_OUT", NULL, "SLIMBUS_0_RX"}, {"BE_OUT", NULL, "SLIMBUS_1_RX"}, {"BE_OUT", NULL, "SLIMBUS_3_RX"}, {"BE_OUT", NULL, "SLIMBUS_4_RX"}, {"BE_OUT", NULL, "SLIMBUS_6_RX"}, {"BE_OUT", NULL, "HDMI"}, {"BE_OUT", NULL, "SPDIF_RX"}, {"BE_OUT", NULL, "MI2S_RX"}, {"BE_OUT", NULL, "QUAT_MI2S_RX"}, {"BE_OUT", NULL, "TERT_MI2S_RX"}, {"BE_OUT", NULL, "SEC_MI2S_RX"}, {"BE_OUT", NULL, "SEC_MI2S_RX_SD1"}, {"BE_OUT", NULL, "PRI_MI2S_RX"}, {"BE_OUT", NULL, "INT_BT_SCO_RX"}, {"BE_OUT", NULL, "INT_FM_RX"}, {"BE_OUT", NULL, "PCM_RX"}, {"BE_OUT", NULL, "SLIMBUS_3_RX"}, {"BE_OUT", NULL, "AUX_PCM_RX"}, {"BE_OUT", NULL, "SEC_AUX_PCM_RX"}, {"BE_OUT", NULL, "INT_BT_SCO_RX"}, {"BE_OUT", NULL, "INT_FM_RX"}, {"BE_OUT", NULL, "PCM_RX"}, {"BE_OUT", NULL, "SLIMBUS_3_RX"}, {"BE_OUT", NULL, "AUX_PCM_RX"}, {"BE_OUT", NULL, "SEC_AUX_PCM_RX"}, {"BE_OUT", NULL, "VOICE_PLAYBACK_TX"}, {"BE_OUT", NULL, "VOICE2_PLAYBACK_TX"}, {"PRI_I2S_TX", NULL, "BE_IN"}, {"MI2S_TX", NULL, "BE_IN"}, {"QUAT_MI2S_TX", NULL, "BE_IN"}, {"PRI_MI2S_TX", NULL, "BE_IN"}, {"TERT_MI2S_TX", NULL, "BE_IN"}, {"SEC_MI2S_TX", NULL, "BE_IN"}, {"SLIMBUS_0_TX", NULL, "BE_IN" }, {"SLIMBUS_1_TX", NULL, "BE_IN" }, {"SLIMBUS_3_TX", NULL, "BE_IN" }, {"SLIMBUS_4_TX", NULL, "BE_IN" }, {"SLIMBUS_5_TX", NULL, "BE_IN" }, {"SLIMBUS_6_TX", NULL, "BE_IN" }, {"INT_BT_SCO_TX", NULL, "BE_IN"}, {"INT_FM_TX", NULL, "BE_IN"}, {"PCM_TX", NULL, "BE_IN"}, {"BE_OUT", NULL, "SLIMBUS_3_RX"}, {"BE_OUT", NULL, "STUB_RX"}, {"STUB_TX", NULL, "BE_IN"}, {"STUB_1_TX", NULL, "BE_IN"}, {"BE_OUT", NULL, "AUX_PCM_RX"}, {"AUX_PCM_TX", NULL, "BE_IN"}, {"SEC_AUX_PCM_TX", NULL, "BE_IN"}, {"INCALL_RECORD_TX", NULL, "BE_IN"}, {"INCALL_RECORD_RX", NULL, "BE_IN"}, {"SLIM0_RX_VI_FB_LCH_MUX", "SLIM4_TX", "SLIMBUS_4_TX"}, {"SLIM0_RX_VI_FB_RCH_MUX", "SLIM4_TX", "SLIMBUS_4_TX"}, {"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_LCH_MUX"}, {"SLIMBUS_0_RX", NULL, "SLIM0_RX_VI_FB_RCH_MUX"}, }; static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; unsigned int be_id = rtd->dai_link->be_id; if (be_id >= MSM_BACKEND_DAI_MAX) { pr_err("%s: unexpected be_id %d\n", __func__, be_id); return -EINVAL; } mutex_lock(&routing_lock); msm_bedais[be_id].sample_rate = params_rate(params); msm_bedais[be_id].channel = params_channels(params); msm_bedais[be_id].format = params_format(params); mutex_unlock(&routing_lock); return 0; } static int msm_pcm_routing_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; unsigned int be_id = rtd->dai_link->be_id; int i, session_type, path_type, topology; struct msm_pcm_routing_bdai_data *bedai; struct msm_pcm_routing_fdai_data *fdai; if (be_id >= MSM_BACKEND_DAI_MAX) { pr_err("%s: unexpected be_id %d\n", __func__, be_id); return -EINVAL; } bedai = &msm_bedais[be_id]; session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) path_type = ADM_PATH_PLAYBACK; else path_type = ADM_PATH_LIVE_REC; mutex_lock(&routing_lock); for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) { fdai = &fe_dai_map[i][session_type]; if (fdai->strm_id != INVALID_SESSION) { int idx; int port_id; unsigned long copp = session_copp_map[i][session_type][be_id]; for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) if (test_bit(idx, &copp)) break; fdai->be_srate = bedai->sample_rate; port_id = bedai->port_id; topology= adm_get_topology_for_port_copp_idx(port_id, idx); adm_close(bedai->port_id, fdai->perf_mode, idx); pr_debug("%s: copp:%ld,idx bit fe:%d, type:%d,be:%d topology=0x%x\n", __func__, copp, i, session_type, be_id, topology); clear_bit(idx, &session_copp_map[i][session_type][be_id]); if ((fdai->perf_mode == LEGACY_PCM_MODE) && (bedai->compr_passthr_mode == LEGACY_PCM)) msm_pcm_routing_deinit_pp(bedai->port_id, topology); } } bedai->compr_passthr_mode = LEGACY_PCM; bedai->active = 0; bedai->sample_rate = 0; bedai->channel = 0; mutex_unlock(&routing_lock); return 0; } static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; unsigned int be_id = rtd->dai_link->be_id; int i, path_type, session_type, topology; struct msm_pcm_routing_bdai_data *bedai; u32 channels, sample_rate; bool playback, capture; uint16_t bits_per_sample = 16; struct msm_pcm_routing_fdai_data *fdai; if (be_id >= MSM_BACKEND_DAI_MAX) { pr_err("%s: unexpected be_id %d\n", __func__, be_id); return -EINVAL; } bedai = &msm_bedais[be_id]; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (bedai->compr_passthr_mode != LEGACY_PCM) path_type = ADM_PATH_COMPRESSED_RX; else path_type = ADM_PATH_PLAYBACK; session_type = SESSION_TYPE_RX; } else { path_type = ADM_PATH_LIVE_REC; session_type = SESSION_TYPE_TX; } mutex_lock(&routing_lock); if (bedai->active == 1) goto done; bedai->active = 1; playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; capture = substream->stream == SNDRV_PCM_STREAM_CAPTURE; for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) { fdai = &fe_dai_map[i][session_type]; if (fdai->strm_id != INVALID_SESSION) { int app_type, app_type_idx, copp_idx, acdb_dev_id; if (session_type == SESSION_TYPE_TX && fdai->be_srate && (fdai->be_srate != bedai->sample_rate)) { pr_debug("%s: flush strm %d diff BE rates\n", __func__, fdai->strm_id); if (fdai->event_info.event_func) fdai->event_info.event_func( MSM_PCM_RT_EVT_BUF_RECFG, fdai->event_info.priv_data); fdai->be_srate = 0; } if (bedai->format == SNDRV_PCM_FORMAT_S24_LE) bits_per_sample = 24; app_type = playback ? fe_dai_app_type_cfg[i].app_type : 0; if (app_type) { app_type_idx = msm_pcm_routing_get_app_type_idx(app_type); sample_rate = fe_dai_app_type_cfg[i].sample_rate; bits_per_sample = app_type_cfg[app_type_idx].bit_width; } else sample_rate = bedai->sample_rate; channels = bedai->channel; acdb_dev_id = fe_dai_app_type_cfg[i].acdb_dev_id; topology = msm_routing_get_adm_topology(path_type, i); copp_idx = adm_open(bedai->port_id, path_type, sample_rate, channels, topology, fdai->perf_mode, bits_per_sample, app_type, acdb_dev_id); if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) { pr_err("%s: adm open failed\n", __func__); mutex_unlock(&routing_lock); return -EINVAL; } pr_debug("%s: setting idx bit of fe:%d, type: %d, be:%d\n", __func__, i, session_type, be_id); set_bit(copp_idx, &session_copp_map[i][session_type][be_id]); msm_pcm_routing_build_matrix(i, session_type, path_type, fdai->perf_mode); if ((fdai->perf_mode == LEGACY_PCM_MODE) && (bedai->compr_passthr_mode == LEGACY_PCM)) msm_pcm_routing_cfg_pp(bedai->port_id, copp_idx, topology, channels); } } done: mutex_unlock(&routing_lock); return 0; } static int msm_routing_send_device_pp_params(int port_id, int copp_idx) { int index, topo_id, be_idx; unsigned long pp_config = 0; bool mute_on; int latency; pr_debug("%s: port_id %d, copp_idx %d\n", __func__, port_id, copp_idx); if (port_id != HDMI_RX) { pr_err("%s: Device pp params on invalid port %d\n", __func__, port_id); return -EINVAL; } for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) { if (port_id == msm_bedais[be_idx].port_id) break; } if (be_idx >= MSM_BACKEND_DAI_MAX) { pr_debug("%s: Invalid be id %d\n", __func__, be_idx); return -EINVAL; } for (index = 0; index < MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX; index++) { if (msm_bedais_pp_params[index].port_id == port_id) break; } if (index >= MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX) { pr_err("%s: Invalid backend pp params index %d\n", __func__, index); return -EINVAL; } topo_id = adm_get_topology_for_port_copp_idx(port_id, copp_idx); if (topo_id != COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY) { pr_err("%s: Invalid passthrough topology 0x%x\n", __func__, topo_id); return -EINVAL; } pp_config = msm_bedais_pp_params[index].pp_params_config; if (test_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config)) { pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__); clear_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config); mute_on = msm_bedais_pp_params[index].mute_on; if ((msm_bedais[be_idx].active) && (msm_bedais[be_idx].compr_passthr_mode != LEGACY_PCM)) adm_send_compressed_device_mute(port_id, copp_idx, mute_on); } if (test_bit(ADM_PP_PARAM_LATENCY_BIT, &pp_config)) { pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__); clear_bit(ADM_PP_PARAM_LATENCY_BIT, &pp_config); latency = msm_bedais_pp_params[index].latency; if ((msm_bedais[be_idx].active) && (msm_bedais[be_idx].compr_passthr_mode != LEGACY_PCM)) adm_send_compressed_device_latency(port_id, copp_idx, latency); } return 0; } static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int pp_id = ucontrol->value.integer.value[0]; int port_id = 0; int index, be_idx, i, topo_id, idx; bool mute; int latency; pr_debug("%s: pp_id: 0x%x\n", __func__, pp_id); for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) { port_id = msm_bedais[be_idx].port_id; if (port_id == HDMI_RX) break; } if (be_idx >= MSM_BACKEND_DAI_MAX) { pr_debug("%s: Invalid be id %d\n", __func__, be_idx); return -EINVAL; } for (index = 0; index < MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX; index++) { if (msm_bedais_pp_params[index].port_id == port_id) break; } if (index >= MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX) { pr_err("%s: Invalid pp params backend index %d\n", __func__, index); return -EINVAL; } for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) { for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) { unsigned long copp = session_copp_map[i] [SESSION_TYPE_RX][be_idx]; if (!test_bit(idx, &copp)) continue; topo_id = adm_get_topology_for_port_copp_idx(port_id, idx); if (topo_id != COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY) continue; pr_debug("%s: port: 0x%x, copp %ld, be active: %d, passt: %d\n", __func__, port_id, copp, msm_bedais[be_idx].active, msm_bedais[be_idx].compr_passthr_mode); switch (pp_id) { case ADM_PP_PARAM_MUTE_ID: pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__); mute = ucontrol->value.integer.value[1] ? true : false; msm_bedais_pp_params[index].mute_on = mute; set_bit(ADM_PP_PARAM_MUTE_BIT, &msm_bedais_pp_params[index].pp_params_config); if ((msm_bedais[be_idx].active) && (msm_bedais[be_idx].compr_passthr_mode != LEGACY_PCM)) adm_send_compressed_device_mute(port_id, copp, mute); break; case ADM_PP_PARAM_LATENCY_ID: pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__); msm_bedais_pp_params[index].latency = ucontrol->value.integer.value[1]; set_bit(ADM_PP_PARAM_LATENCY_BIT, &msm_bedais_pp_params[index].pp_params_config); latency = msm_bedais_pp_params[index].latency = ucontrol->value.integer.value[1]; if ((msm_bedais[be_idx].active) && (msm_bedais[be_idx].compr_passthr_mode != LEGACY_PCM)) adm_send_compressed_device_latency(port_id, copp, latency); break; default: pr_info("%s, device pp param %d not supported\n", __func__, pp_id); break; } } } return 0; } static int msm_routing_get_device_pp_params_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s:msm_routing_get_device_pp_params_mixer", __func__); return 0; } static const struct snd_kcontrol_new device_pp_params_mixer_controls[] = { SOC_SINGLE_MULTI_EXT("Device PP Params", SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 3, msm_routing_get_device_pp_params_mixer, msm_routing_put_device_pp_params_mixer), }; static struct snd_pcm_ops msm_routing_pcm_ops = { .hw_params = msm_pcm_routing_hw_params, .close = msm_pcm_routing_close, .prepare = msm_pcm_routing_prepare, }; static unsigned int msm_routing_read(struct snd_soc_platform *platform, unsigned int reg) { dev_dbg(platform->dev, "reg %x\n", reg); return 0; } static int msm_routing_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val) { dev_dbg(platform->dev, "reg %x val %x\n", reg, val); return 0; } static int msm_routing_probe(struct snd_soc_platform *platform) { snd_soc_dapm_new_controls(&platform->dapm, msm_qdsp6_widgets, ARRAY_SIZE(msm_qdsp6_widgets)); snd_soc_dapm_add_routes(&platform->dapm, intercon, ARRAY_SIZE(intercon)); snd_soc_dapm_new_widgets(&platform->dapm); snd_soc_add_platform_controls(platform, lsm_function, ARRAY_SIZE(lsm_function)); snd_soc_add_platform_controls(platform, afe_quat_mi2s_vol_mixer_controls, ARRAY_SIZE(afe_quat_mi2s_vol_mixer_controls)); snd_soc_add_platform_controls(platform, afe_tert_mi2s_vol_mixer_controls, ARRAY_SIZE(afe_tert_mi2s_vol_mixer_controls)); snd_soc_add_platform_controls(platform, aanc_slim_0_rx_mux, ARRAY_SIZE(aanc_slim_0_rx_mux)); snd_soc_add_platform_controls(platform, msm_voc_session_controls, ARRAY_SIZE(msm_voc_session_controls)); snd_soc_add_platform_controls(platform, app_type_cfg_controls, ARRAY_SIZE(app_type_cfg_controls)); snd_soc_add_platform_controls(platform, stereo_to_custom_stereo_controls, ARRAY_SIZE(stereo_to_custom_stereo_controls)); msm_qti_pp_add_controls(platform); msm_dts_srs_tm_add_controls(platform); msm_dolby_dap_add_controls(platform); snd_soc_add_platform_controls(platform, use_ds1_or_ds2_controls, ARRAY_SIZE(use_ds1_or_ds2_controls)); snd_soc_add_platform_controls(platform, device_pp_params_mixer_controls, ARRAY_SIZE(device_pp_params_mixer_controls)); msm_dts_eagle_add_controls(platform); return 0; } int msm_routing_pcm_new(struct snd_soc_pcm_runtime *runtime) { return msm_pcm_routing_hwdep_new(runtime, msm_bedais); } void msm_routing_pcm_free(struct snd_pcm *pcm) { msm_pcm_routing_hwdep_free(pcm); } static struct snd_soc_platform_driver msm_soc_routing_platform = { .ops = &msm_routing_pcm_ops, .probe = msm_routing_probe, .read = msm_routing_read, .write = msm_routing_write, .pcm_new = msm_routing_pcm_new, .pcm_free = msm_routing_pcm_free, }; static int msm_routing_pcm_probe(struct platform_device *pdev) { dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &msm_soc_routing_platform); } static int msm_routing_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static const struct of_device_id msm_pcm_routing_dt_match[] = { {.compatible = "qcom,msm-pcm-routing"}, {} }; MODULE_DEVICE_TABLE(of, msm_pcm_routing_dt_match); static struct platform_driver msm_routing_pcm_driver = { .driver = { .name = "msm-pcm-routing", .owner = THIS_MODULE, .of_match_table = msm_pcm_routing_dt_match, }, .probe = msm_routing_pcm_probe, .remove = msm_routing_pcm_remove, }; int msm_routing_check_backend_enabled(int fedai_id) { int i; if (fedai_id >= MSM_FRONTEND_DAI_MM_MAX_ID) { pr_err("%s: bad MM ID\n", __func__); return 0; } for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) { if (test_bit(fedai_id, &msm_bedais[i].fe_sessions)) return msm_bedais[i].active; } return 0; } static int get_cal_path(int path_type) { if (path_type == ADM_PATH_PLAYBACK || path_type == ADM_PATH_COMPRESSED_RX) return RX_DEVICE; else return TX_DEVICE; } static int msm_routing_set_cal(int32_t cal_type, size_t data_size, void *data) { int ret = 0; pr_debug("%s\n", __func__); ret = cal_utils_set_cal(data_size, data, cal_data, 0, NULL); if (ret < 0) { pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n", __func__, ret, cal_type); ret = -EINVAL; goto done; } done: return ret; } static void msm_routing_delete_cal_data(void) { pr_debug("%s\n", __func__); cal_utils_destroy_cal_types(1, &cal_data); return; } static int msm_routing_init_cal_data(void) { int ret = 0; struct cal_type_info cal_type_info = { {ADM_TOPOLOGY_CAL_TYPE, {NULL, NULL, NULL, msm_routing_set_cal, NULL, NULL} }, {NULL, NULL, cal_utils_match_buf_num} }; pr_debug("%s\n", __func__); ret = cal_utils_create_cal_types(1, &cal_data, &cal_type_info); if (ret < 0) { pr_err("%s: could not create cal type!\n", __func__); ret = -EINVAL; goto err; } return ret; err: msm_routing_delete_cal_data(); return ret; } static int __init msm_soc_routing_platform_init(void) { mutex_init(&routing_lock); if (msm_routing_init_cal_data()) pr_err("%s: could not init cal data!\n", __func__); memset(htc_adm_effect,0, sizeof(struct htc_adm_effect_s)*HTC_ADM_EFFECT_MAX); return platform_driver_register(&msm_routing_pcm_driver); } module_init(msm_soc_routing_platform_init); static void __exit msm_soc_routing_platform_exit(void) { msm_routing_delete_cal_data(); platform_driver_unregister(&msm_routing_pcm_driver); } module_exit(msm_soc_routing_platform_exit); MODULE_DESCRIPTION("MSM routing platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
huther/personal_projects
my_m4/m4-1.4.18/lib/spawn_faction_init.c
9
1722
/* Copyright (C) 2000, 2009-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> /* Specification. */ #include <spawn.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include "spawn_int.h" /* Function used to increase the size of the allocated array. This function is called from the 'add'-functions. */ int __posix_spawn_file_actions_realloc (posix_spawn_file_actions_t *file_actions) { int newalloc = file_actions->_allocated + 8; void *newmem = realloc (file_actions->_actions, newalloc * sizeof (struct __spawn_action)); if (newmem == NULL) /* Not enough memory. */ return ENOMEM; file_actions->_actions = (struct __spawn_action *) newmem; file_actions->_allocated = newalloc; return 0; } /* Initialize data structure for file attribute for 'spawn' call. */ int posix_spawn_file_actions_init (posix_spawn_file_actions_t *file_actions) { /* Simply clear all the elements. */ memset (file_actions, '\0', sizeof (*file_actions)); return 0; }
gpl-2.0
tsoliman/scummvm
engines/neverhood/modules/module1500.cpp
9
3653
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include "neverhood/modules/module1500.h" namespace Neverhood { Module1500::Module1500(NeverhoodEngine *vm, Module *parentModule, int which) : Module(vm, parentModule) { if (which < 0) createScene(_vm->gameState().sceneNum, -1); else createScene(3, -1); } void Module1500::createScene(int sceneNum, int which) { debug(1, "Module1500::createScene(%d, %d)", sceneNum, which); _sceneNum = sceneNum; switch (_sceneNum) { case 0: _vm->gameState().sceneNum = 0; _childObject = new Scene1501(_vm, this, 0x8420221D, 0xA61024C4, 150, 48); break; case 1: _vm->gameState().sceneNum = 1; _childObject = new Scene1501(_vm, this, 0x30050A0A, 0x58B45E58, 110, 48); break; case 2: _vm->gameState().sceneNum = 2; sendMessage(_parentModule, 0x0800, 0); createSmackerScene(0x001A0005, true, true, true); break; case 3: _vm->gameState().sceneNum = 3; _childObject = new Scene1501(_vm, this, 0x0CA04202, 0, 110, 48); break; default: break; } SetUpdateHandler(&Module1500::updateScene); _childObject->handleUpdate(); } void Module1500::updateScene() { if (!updateChild()) { switch (_sceneNum) { case 0: createScene(1, -1); break; case 1: createScene(2, -1); break; case 3: createScene(0, -1); break; default: leaveModule(0); break; } } } // Scene1501 Scene1501::Scene1501(NeverhoodEngine *vm, Module *parentModule, uint32 backgroundFileHash, uint32 soundFileHash, int countdown2, int countdown3) : Scene(vm, parentModule), _countdown3(countdown3), _countdown2(countdown2), _countdown1(0), _skip(false) { SetUpdateHandler(&Scene1501::update); SetMessageHandler(&Scene1501::handleMessage); setBackground(backgroundFileHash); setPalette(); addEntity(_palette); _palette->addBasePalette(backgroundFileHash, 0, 256, 0); _palette->startFadeToPalette(12); if (soundFileHash != 0) playSound(0, soundFileHash); } void Scene1501::update() { Scene::update(); if (_countdown1 != 0) { _countdown1--; if (_countdown1 == 0 || _skip) { _vm->_screen->clear(); leaveScene(0); } } else if ((_countdown2 != 0 && (--_countdown2 == 0)) || (_countdown2 == 0 && !isSoundPlaying(0)) || _skip) { _countdown1 = 12; _palette->startFadeToBlack(11); } if (_countdown3 != 0) _countdown3--; if (_countdown3 == 0 && _skip && _countdown1 == 0) { _countdown1 = 12; _palette->startFadeToBlack(11); } } uint32 Scene1501::handleMessage(int messageNum, const MessageParam &param, Entity *sender) { uint32 messageResult = Scene::handleMessage(messageNum, param, sender); switch (messageNum) { case NM_KEYPRESS_SPACE: _skip = true; break; default: break; } return messageResult; } } // End of namespace Neverhood
gpl-2.0
saeedhadi/rt-thread
bsp/lpc2478/drivers/serial.c
9
8910
/* * File : serial.c * This file is part of RT-Thread RTOS * COPYRIGHT (C) 2006, RT-Thread Development Team * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://openlab.rt-thread.com/license/LICENSE * * Change Logs: * Date Author Notes * 2006-08-23 Bernard first version */ #include <rthw.h> #include <rtthread.h> #include "LPC24xx.h" #include "board.h" /* serial hardware register */ #define REG8(d) (*((volatile unsigned char *)(d))) #define REG32(d) (*((volatile unsigned long *)(d))) #define UART_RBR(base) REG8(base + 0x00) #define UART_THR(base) REG8(base + 0x00) #define UART_IER(base) REG32(base + 0x04) #define UART_IIR(base) REG32(base + 0x08) #define UART_FCR(base) REG8(base + 0x08) #define UART_LCR(base) REG8(base + 0x0C) #define UART_MCR(base) REG8(base + 0x10) #define UART_LSR(base) REG8(base + 0x14) #define UART_MSR(base) REG8(base + 0x18) #define UART_SCR(base) REG8(base + 0x1C) #define UART_DLL(base) REG8(base + 0x00) #define UART_DLM(base) REG8(base + 0x04) #define UART_ACR(base) REG32(base + 0x20) #define UART_FDR(base) REG32(base + 0x28) #define UART_TER(base) REG8(base + 0x30) /* LPC serial device */ struct rt_lpcserial { /* inherit from device */ struct rt_device parent; rt_uint32_t hw_base; rt_uint32_t irqno; rt_uint32_t baudrate; /* reception field */ rt_uint16_t save_index, read_index; rt_uint8_t rx_buffer[RT_UART_RX_BUFFER_SIZE]; }; #ifdef RT_USING_UART1 struct rt_lpcserial serial1; #endif #ifdef RT_USING_UART2 struct rt_lpcserial serial2; #endif void rt_hw_serial_init(void); #define U0PINS 0x00000005 void rt_hw_uart_isr(struct rt_lpcserial* lpc_serial) { UNUSED rt_uint32_t iir; RT_ASSERT(lpc_serial != RT_NULL) if (UART_LSR(lpc_serial->hw_base) & 0x01) { rt_base_t level; while (UART_LSR(lpc_serial->hw_base) & 0x01) { /* disable interrupt */ level = rt_hw_interrupt_disable(); /* read character */ lpc_serial->rx_buffer[lpc_serial->save_index] = UART_RBR(lpc_serial->hw_base); lpc_serial->save_index ++; if (lpc_serial->save_index >= RT_UART_RX_BUFFER_SIZE) lpc_serial->save_index = 0; /* if the next position is read index, discard this 'read char' */ if (lpc_serial->save_index == lpc_serial->read_index) { lpc_serial->read_index ++; if (lpc_serial->read_index >= RT_UART_RX_BUFFER_SIZE) lpc_serial->read_index = 0; } /* enable interrupt */ rt_hw_interrupt_enable(level); } /* invoke callback */ if(lpc_serial->parent.rx_indicate != RT_NULL) { lpc_serial->parent.rx_indicate(&lpc_serial->parent, 1); } } /* clear interrupt source */ iir = UART_IIR(lpc_serial->hw_base); /* acknowledge Interrupt */ VICVectAddr = 0; } #ifdef RT_USING_UART1 void rt_hw_uart_isr_1(int irqno) { /* get lpc serial device */ rt_hw_uart_isr(&serial1); } #endif #ifdef RT_USING_UART2 void rt_hw_uart_isr_2(int irqno) { /* get lpc serial device */ rt_hw_uart_isr(&serial2); } #endif /** * @addtogroup LPC214x */ /*@{*/ static rt_err_t rt_serial_init (rt_device_t dev) { return RT_EOK; } static rt_err_t rt_serial_open(rt_device_t dev, rt_uint16_t oflag) { struct rt_lpcserial* lpc_serial; lpc_serial = (struct rt_lpcserial*) dev; RT_ASSERT(lpc_serial != RT_NULL); if (dev->flag & RT_DEVICE_FLAG_INT_RX) { /* init UART rx interrupt */ UART_IER(lpc_serial->hw_base) = 0x01; /* install ISR */ if (lpc_serial->irqno == UART0_INT) { #ifdef RT_USING_UART1 rt_hw_interrupt_install(lpc_serial->irqno, rt_hw_uart_isr_1, RT_NULL); #endif } else { #ifdef RT_USING_UART2 rt_hw_interrupt_install(lpc_serial->irqno, rt_hw_uart_isr_2, RT_NULL); #endif } rt_hw_interrupt_umask(lpc_serial->irqno); } return RT_EOK; } static rt_err_t rt_serial_close(rt_device_t dev) { struct rt_lpcserial* lpc_serial; lpc_serial = (struct rt_lpcserial*) dev; RT_ASSERT(lpc_serial != RT_NULL); if (dev->flag & RT_DEVICE_FLAG_INT_RX) { /* disable UART rx interrupt */ UART_IER(lpc_serial->hw_base) = 0x00; } return RT_EOK; } static rt_err_t rt_serial_control(rt_device_t dev, rt_uint8_t cmd, void *args) { return RT_EOK; } static rt_size_t rt_serial_read(rt_device_t dev, rt_off_t pos, void* buffer, rt_size_t size) { rt_uint8_t* ptr; struct rt_lpcserial *lpc_serial = (struct rt_lpcserial*)dev; RT_ASSERT(lpc_serial != RT_NULL); /* point to buffer */ ptr = (rt_uint8_t*) buffer; if (dev->flag & RT_DEVICE_FLAG_INT_RX) { while (size) { /* interrupt receive */ rt_base_t level; /* disable interrupt */ level = rt_hw_interrupt_disable(); if (lpc_serial->read_index != lpc_serial->save_index) { *ptr = lpc_serial->rx_buffer[lpc_serial->read_index]; lpc_serial->read_index ++; if (lpc_serial->read_index >= RT_UART_RX_BUFFER_SIZE) lpc_serial->read_index = 0; } else { /* no data in rx buffer */ /* enable interrupt */ rt_hw_interrupt_enable(level); break; } /* enable interrupt */ rt_hw_interrupt_enable(level); ptr ++; size --; } return (rt_uint32_t)ptr - (rt_uint32_t)buffer; } else if (dev->flag & RT_DEVICE_FLAG_DMA_RX) { /* not support right now */ RT_ASSERT(0); } /* polling mode */ while (size && (UART_LSR(lpc_serial->hw_base) & 0x01)) { /* Read Character */ *ptr = UART_RBR(lpc_serial->hw_base); ptr ++; size --; } return (rt_size_t)ptr - (rt_size_t)buffer; } static rt_size_t rt_serial_write(rt_device_t dev, rt_off_t pos, const void* buffer, rt_size_t size) { struct rt_lpcserial* lpc_serial; char *ptr; lpc_serial = (struct rt_lpcserial*) dev; if (dev->flag & RT_DEVICE_FLAG_INT_TX) { /* not support */ RT_ASSERT(0); } else if (dev->flag & RT_DEVICE_FLAG_DMA_TX) { /* not support */ RT_ASSERT(0); } /* polling write */ ptr = (char *)buffer; if (dev->flag & RT_DEVICE_FLAG_STREAM) { /* stream mode */ while (size) { if (*ptr == '\n') { while (!(UART_LSR(lpc_serial->hw_base) & 0x20)); UART_THR(lpc_serial->hw_base) = '\r'; } while (!(UART_LSR(lpc_serial->hw_base) & 0x20)); UART_THR(lpc_serial->hw_base) = *ptr; ptr ++; size --; } } else { while (size) { while (!(UART_LSR(lpc_serial->hw_base) & 0x20)); UART_THR(lpc_serial->hw_base) = *ptr; ptr ++; size --; } } return (rt_size_t) ptr - (rt_size_t) buffer; } void rt_hw_serial_init(void) { struct rt_lpcserial* lpc_serial; #ifdef RT_USING_UART1 lpc_serial = &serial1; lpc_serial->parent.type = RT_Device_Class_Char; lpc_serial->hw_base = 0xE000C000; lpc_serial->baudrate = 115200; lpc_serial->irqno = UART0_INT; rt_memset(lpc_serial->rx_buffer, 0, sizeof(lpc_serial->rx_buffer)); lpc_serial->read_index = lpc_serial->save_index = 0; /* Enable UART0 RxD and TxD pins */ PINSEL0 |= 0x50; /* 8 bits, no Parity, 1 Stop bit */ UART_LCR(lpc_serial->hw_base) = 0x83; /* Setup Baudrate */ UART_DLL(lpc_serial->hw_base) = (PCLK/16/lpc_serial->baudrate) & 0xFF; UART_DLM(lpc_serial->hw_base) = ((PCLK/16/lpc_serial->baudrate) >> 8) & 0xFF; /* DLAB = 0 */ UART_LCR(lpc_serial->hw_base) = 0x03; lpc_serial->parent.type = RT_Device_Class_Char; lpc_serial->parent.init = rt_serial_init; lpc_serial->parent.open = rt_serial_open; lpc_serial->parent.close = rt_serial_close; lpc_serial->parent.read = rt_serial_read; lpc_serial->parent.write = rt_serial_write; lpc_serial->parent.control = rt_serial_control; lpc_serial->parent.user_data = RT_NULL; rt_device_register(&lpc_serial->parent, "uart1", RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX | RT_DEVICE_FLAG_STREAM); #endif #ifdef RT_USING_UART2 lpc_serial = &serial2; lpc_serial->parent.type = RT_Device_Class_Char; lpc_serial->hw_base = 0xE0010000; lpc_serial->baudrate = 115200; lpc_serial->irqno = UART1_INT; rt_memset(lpc_serial->rx_buffer, 0, sizeof(lpc_serial->rx_buffer)); lpc_serial->read_index = lpc_serial->save_index = 0; /* Enable UART1 RxD and TxD pins */ PINSEL0 |= 0x05 << 16; /* 8 bits, no Parity, 1 Stop bit */ UART_LCR(lpc_serial->hw_base) = 0x83; /* Setup Baudrate */ UART_DLL(lpc_serial->hw_base) = (PCLK/16/lpc_serial->baudrate) & 0xFF; UART_DLM(lpc_serial->hw_base) = ((PCLK/16/lpc_serial->baudrate) >> 8) & 0xFF; /* DLAB = 0 */ UART_LCR(lpc_serial->hw_base) = 0x03; lpc_serial->parent.type = RT_Device_Class_Char; lpc_serial->parent.init = rt_serial_init; lpc_serial->parent.open = rt_serial_open; lpc_serial->parent.close = rt_serial_close; lpc_serial->parent.read = rt_serial_read; lpc_serial->parent.write = rt_serial_write; lpc_serial->parent.control = rt_serial_control; lpc_serial->parent.user_data = RT_NULL; rt_device_register(&lpc_serial->parent, "uart2", RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX); #endif } /*@}*/
gpl-2.0
aircross/ray
package/ezpup/src/libtomcrypt-1.00/src/pk/dh/dh.c
9
14268
/* LibTomCrypt, modular cryptographic library -- Tom St Denis * * LibTomCrypt is a library that provides various cryptographic * algorithms in a highly modular and flexible manner. * * The library is free for all purposes without any express * guarantee it works. * * Tom St Denis, tomstdenis@iahu.ca, http://libtomcrypt.org */ #include "tomcrypt.h" /** @file dh.c DH crypto, Tom St Denis */ #ifdef MDH /* max export size we'll encounter (smaller than this but lets round up a bit) */ #define DH_BUF_SIZE 1200 /* This holds the key settings. ***MUST*** be organized by size from smallest to largest. */ static const struct { int size; char *name, *base, *prime; } sets[] = { #ifdef DH768 { 96, "DH-768", "4", "F///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "//////m3wvV" }, #endif #ifdef DH1024 { 128, "DH-1024", "4", "F///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////m3C47" }, #endif #ifdef DH1280 { 160, "DH-1280", "4", "F///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "//////////////////////////////m4kSN" }, #endif #ifdef DH1536 { 192, "DH-1536", "4", "F///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////m5uqd" }, #endif #ifdef DH1792 { 224, "DH-1792", "4", "F///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "//////////////////////////////////////////////////////mT/sd" }, #endif #ifdef DH2048 { 256, "DH-2048", "4", "3///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "/////////////////////////////////////////m8MPh" }, #endif #ifdef DH2560 { 320, "DH-2560", "4", "3///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "/////mKFpF" }, #endif #ifdef DH3072 { 384, "DH-3072", "4", "3///////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "/////////////////////////////m32nN" }, #endif #ifdef DH4096 { 512, "DH-4096", "4", "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "////////////////////////////////////////////////////////////" "/////////////////////m8pOF" }, #endif { 0, NULL, NULL, NULL } }; static int is_valid_idx(int n) { int x; for (x = 0; sets[x].size; x++); if ((n < 0) || (n >= x)) { return 0; } return 1; } /** Test the DH sub-system (can take a while) @return CRYPT_OK if successful */ int dh_test(void) { mp_int p, g, tmp; int x, err, primality; if ((err = mp_init_multi(&p, &g, &tmp, NULL)) != MP_OKAY) { goto error; } for (x = 0; sets[x].size != 0; x++) { #if 0 printf("dh_test():testing size %d-bits\n", sets[x].size * 8); #endif if ((err = mp_read_radix(&g,(char *)sets[x].base, 64)) != MP_OKAY) { goto error; } if ((err = mp_read_radix(&p,(char *)sets[x].prime, 64)) != MP_OKAY) { goto error; } /* ensure p is prime */ if ((err = is_prime(&p, &primality)) != CRYPT_OK) { goto done; } if (primality == 0) { err = CRYPT_FAIL_TESTVECTOR; goto done; } if ((err = mp_sub_d(&p, 1, &tmp)) != MP_OKAY) { goto error; } if ((err = mp_div_2(&tmp, &tmp)) != MP_OKAY) { goto error; } /* ensure (p-1)/2 is prime */ if ((err = is_prime(&tmp, &primality)) != CRYPT_OK) { goto done; } if (primality == 0) { err = CRYPT_FAIL_TESTVECTOR; goto done; } /* now see if g^((p-1)/2) mod p is in fact 1 */ if ((err = mp_exptmod(&g, &tmp, &p, &tmp)) != MP_OKAY) { goto error; } if (mp_cmp_d(&tmp, 1)) { err = CRYPT_FAIL_TESTVECTOR; goto done; } } err = CRYPT_OK; goto done; error: err = mpi_to_ltc_error(err); done: mp_clear_multi(&tmp, &g, &p, NULL); return err; } /** Get the min and max DH key sizes (octets) @param low [out] The smallest key size supported @param high [out] The largest key size supported */ void dh_sizes(int *low, int *high) { int x; LTC_ARGCHK(low != NULL); LTC_ARGCHK(high != NULL); *low = INT_MAX; *high = 0; for (x = 0; sets[x].size != 0; x++) { if (*low > sets[x].size) *low = sets[x].size; if (*high < sets[x].size) *high = sets[x].size; } } /** Returns the key size of a given DH key (octets) @param key The DH key to get the size of @return The size if valid or INT_MAX if not */ int dh_get_size(dh_key *key) { LTC_ARGCHK(key != NULL); if (is_valid_idx(key->idx) == 1) { return sets[key->idx].size; } else { return INT_MAX; /* large value that would cause dh_make_key() to fail */ } } /** Make a DH key [private key pair] @param prng An active PRNG state @param wprng The index for the PRNG you desire to use @param keysize The key size (octets) desired @param key [out] Where the newly created DH key will be stored @return CRYPT_OK if successful, note: on error all allocated memory will be freed automatically. */ int dh_make_key(prng_state *prng, int wprng, int keysize, dh_key *key) { unsigned char *buf; unsigned long x; mp_int p, g; int err; LTC_ARGCHK(key != NULL); /* good prng? */ if ((err = prng_is_valid(wprng)) != CRYPT_OK) { return err; } /* find key size */ for (x = 0; (keysize > sets[x].size) && (sets[x].size != 0); x++); #ifdef FAST_PK keysize = MIN(sets[x].size, 32); #else keysize = sets[x].size; #endif if (sets[x].size == 0) { return CRYPT_INVALID_KEYSIZE; } key->idx = x; /* allocate buffer */ buf = XMALLOC(keysize); if (buf == NULL) { return CRYPT_MEM; } /* make up random string */ if (prng_descriptor[wprng].read(buf, keysize, prng) != (unsigned long)keysize) { err = CRYPT_ERROR_READPRNG; goto error2; } /* init parameters */ if ((err = mp_init_multi(&g, &p, &key->x, &key->y, NULL)) != MP_OKAY) { goto error; } if ((err = mp_read_radix(&g, sets[key->idx].base, 64)) != MP_OKAY) { goto error; } if ((err = mp_read_radix(&p, sets[key->idx].prime, 64)) != MP_OKAY) { goto error; } /* load the x value */ if ((err = mp_read_unsigned_bin(&key->x, buf, keysize)) != MP_OKAY) { goto error; } if ((err = mp_exptmod(&g, &key->x, &p, &key->y)) != MP_OKAY) { goto error; } key->type = PK_PRIVATE; if ((err = mp_shrink(&key->x)) != MP_OKAY) { goto error; } if ((err = mp_shrink(&key->y)) != MP_OKAY) { goto error; } /* free up ram */ err = CRYPT_OK; goto done; error: err = mpi_to_ltc_error(err); error2: mp_clear_multi(&key->x, &key->y, NULL); done: #ifdef LTC_CLEAN_STACK zeromem(buf, keysize); #endif mp_clear_multi(&p, &g, NULL); XFREE(buf); return err; } /** Free the allocated ram for a DH key @param key The key which you wish to free */ void dh_free(dh_key *key) { LTC_ARGCHK(key != NULL); mp_clear_multi(&key->x, &key->y, NULL); } /** Export a DH key to a binary packet @param out [out] The destination for the key @param outlen [in/out] The max size and resulting size of the DH key @param type Which type of key (PK_PRIVATE or PK_PUBLIC) @param key The key you wish to export @return CRYPT_OK if successful */ int dh_export(unsigned char *out, unsigned long *outlen, int type, dh_key *key) { unsigned long y, z; int err; LTC_ARGCHK(out != NULL); LTC_ARGCHK(outlen != NULL); LTC_ARGCHK(key != NULL); /* can we store the static header? */ if (*outlen < (PACKET_SIZE + 2)) { return CRYPT_BUFFER_OVERFLOW; } if (type == PK_PRIVATE && key->type != PK_PRIVATE) { return CRYPT_PK_NOT_PRIVATE; } /* header */ y = PACKET_SIZE; /* header */ out[y++] = type; out[y++] = (unsigned char)(sets[key->idx].size / 8); /* export y */ OUTPUT_BIGNUM(&key->y, out, y, z); if (type == PK_PRIVATE) { /* export x */ OUTPUT_BIGNUM(&key->x, out, y, z); } /* store header */ packet_store_header(out, PACKET_SECT_DH, PACKET_SUB_KEY); /* store len */ *outlen = y; return CRYPT_OK; } /** Import a DH key from a binary packet @param in The packet to read @param inlen The length of the input packet @param key [out] Where to import the key to @return CRYPT_OK if successful, on error all allocated memory is freed automatically */ int dh_import(const unsigned char *in, unsigned long inlen, dh_key *key) { unsigned long x, y, s; int err; LTC_ARGCHK(in != NULL); LTC_ARGCHK(key != NULL); /* make sure valid length */ if ((2+PACKET_SIZE) > inlen) { return CRYPT_INVALID_PACKET; } /* check type byte */ if ((err = packet_valid_header((unsigned char *)in, PACKET_SECT_DH, PACKET_SUB_KEY)) != CRYPT_OK) { return err; } /* init */ if ((err = mp_init_multi(&key->x, &key->y, NULL)) != MP_OKAY) { return mpi_to_ltc_error(err); } /* advance past packet header */ y = PACKET_SIZE; /* key type, e.g. private, public */ key->type = (int)in[y++]; /* key size in bytes */ s = (unsigned long)in[y++] * 8; for (x = 0; (s > (unsigned long)sets[x].size) && (sets[x].size != 0); x++); if (sets[x].size == 0) { err = CRYPT_INVALID_KEYSIZE; goto error; } key->idx = (int)x; /* type check both values */ if ((key->type != PK_PUBLIC) && (key->type != PK_PRIVATE)) { err = CRYPT_PK_TYPE_MISMATCH; goto error; } /* is the key idx valid? */ if (is_valid_idx(key->idx) != 1) { err = CRYPT_PK_TYPE_MISMATCH; goto error; } /* load public value g^x mod p*/ INPUT_BIGNUM(&key->y, in, x, y, inlen); if (key->type == PK_PRIVATE) { INPUT_BIGNUM(&key->x, in, x, y, inlen); } /* eliminate private key if public */ if (key->type == PK_PUBLIC) { mp_clear(&key->x); } return CRYPT_OK; error: mp_clear_multi(&key->y, &key->x, NULL); return err; } /** Create a DH shared secret. @param private_key The private DH key in the pair @param public_key The public DH key in the pair @param out [out] The destination of the shared data @param outlen [in/out] The max size and resulting size of the shared data. @return CRYPT_OK if successful */ int dh_shared_secret(dh_key *private_key, dh_key *public_key, unsigned char *out, unsigned long *outlen) { mp_int tmp, p; unsigned long x; int err; LTC_ARGCHK(private_key != NULL); LTC_ARGCHK(public_key != NULL); LTC_ARGCHK(out != NULL); LTC_ARGCHK(outlen != NULL); /* types valid? */ if (private_key->type != PK_PRIVATE) { return CRYPT_PK_NOT_PRIVATE; } /* same idx? */ if (private_key->idx != public_key->idx) { return CRYPT_PK_TYPE_MISMATCH; } /* compute y^x mod p */ if ((err = mp_init_multi(&tmp, &p, NULL)) != MP_OKAY) { return mpi_to_ltc_error(err); } if ((err = mp_read_radix(&p, (char *)sets[private_key->idx].prime, 64)) != MP_OKAY) { goto error; } if ((err = mp_exptmod(&public_key->y, &private_key->x, &p, &tmp)) != MP_OKAY) { goto error; } /* enough space for output? */ x = (unsigned long)mp_unsigned_bin_size(&tmp); if (*outlen < x) { err = CRYPT_BUFFER_OVERFLOW; goto done; } if ((err = mp_to_unsigned_bin(&tmp, out)) != MP_OKAY) { goto error; } *outlen = x; err = CRYPT_OK; goto done; error: err = mpi_to_ltc_error(err); done: mp_clear_multi(&p, &tmp, NULL); return err; } #include "dh_sys.c" #endif
gpl-2.0
monstermosh/ZeroScripts
scripts/examples/example_misc.cpp
9
2010
/* Copyright (C) 2006 - 2013 ScriptDev2 <http://www.scriptdev2.com/> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: Example_Misc SD%Complete: 100 SDComment: Item, Areatrigger and other small code examples SDCategory: Script Examples EndScriptData */ #include "precompiled.h" enum { SAY_HI = -1999925 }; bool AreaTrigger_at_example(Player* pPlayer, AreaTriggerEntry const* /*pAt*/) { DoScriptText(SAY_HI, pPlayer); return true; } extern void LoadDatabase(); bool ItemUse_example_item(Player* /*pPlayer*/, Item* /*pItem*/, SpellCastTargets const& /*scTargets*/) { LoadDatabase(); return true; } bool GOUse_example_go_teleporter(Player* pPlayer, GameObject* /*pGo*/) { pPlayer->TeleportTo(0, 1807.07f, 336.105f, 70.3975f, 0.0f); return false; } void AddSC_example_misc() { Script* pNewScript; pNewScript = new Script; pNewScript->Name = "at_example"; pNewScript->pAreaTrigger = &AreaTrigger_at_example; pNewScript->RegisterSelf(false); pNewScript = new Script; pNewScript->Name = "example_item"; pNewScript->pItemUse = &ItemUse_example_item; pNewScript->RegisterSelf(false); pNewScript = new Script; pNewScript->Name = "example_go_teleporter"; pNewScript->pGOUse = &GOUse_example_go_teleporter; pNewScript->RegisterSelf(false); }
gpl-2.0
sayghteight/JadeEmu-5.4.8
src/server/scripts/EasternKingdoms/ZulAman/boss_hexlord.cpp
9
4338
/* * Copyright (C) 2011-2015 Project SkyFire <http://www.projectskyfire.org/> * Copyright (C) 2008-2015 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2015 MaNGOS <http://getmangos.com/> * Copyright (C) 2006-2014 ScriptDev2 <https://github.com/scriptdev2/scriptdev2/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 3 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "SpellScript.h" #include "SpellAuraEffects.h" #include "zulaman.h" enum Says { SAY_AGGRO = 0, SAY_PLAYER_KILL = 1, SAY_SPIRIT_BOLTS = 2, SAY_SIPHON_SOUL = 3, SAY_PET_DEATH = 4, SAY_DEATH = 5 }; enum Spells { SPELL_WL_UNSTABLE_AFFL = 43522, SPELL_WL_UNSTABLE_AFFL_DISPEL = 43523, }; enum Events { }; class boss_hexlord_malacrass : public CreatureScript { public: boss_hexlord_malacrass() : CreatureScript("boss_hexlord_malacrass") { } struct boss_hex_lord_malacrassAI : public BossAI { boss_hex_lord_malacrassAI(Creature* creature) : BossAI(creature, DATA_HEXLORD) { } void Reset() OVERRIDE { _Reset(); } void EnterCombat(Unit* /*who*/) OVERRIDE { Talk(SAY_AGGRO); _EnterCombat(); } void JustDied(Unit* /*killer*/) OVERRIDE { Talk(SAY_DEATH); _JustDied(); } void KilledUnit(Unit* victim) OVERRIDE { if (victim->GetTypeId() == TYPEID_PLAYER) Talk(SAY_PLAYER_KILL); } void UpdateAI(uint32 diff) OVERRIDE { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; /* while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { default: break; } } */ DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const OVERRIDE { return GetZulAmanAI<boss_hex_lord_malacrassAI>(creature); } }; class spell_hexlord_unstable_affliction : public SpellScriptLoader { public: spell_hexlord_unstable_affliction() : SpellScriptLoader("spell_hexlord_unstable_affliction") { } class spell_hexlord_unstable_affliction_AuraScript : public AuraScript { PrepareAuraScript(spell_hexlord_unstable_affliction_AuraScript); bool Validate(SpellInfo const* /*spell*/) OVERRIDE { if (!sSpellMgr->GetSpellInfo(SPELL_WL_UNSTABLE_AFFL_DISPEL)) return false; return true; } void HandleDispel(DispelInfo* dispelInfo) { if (Unit* caster = GetCaster()) caster->CastSpell(dispelInfo->GetDispeller(), SPELL_WL_UNSTABLE_AFFL_DISPEL, true, NULL, GetEffect(EFFECT_0)); } void Register() OVERRIDE { AfterDispel += AuraDispelFn(spell_hexlord_unstable_affliction_AuraScript::HandleDispel); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hexlord_unstable_affliction_AuraScript(); } }; void AddSC_boss_hex_lord_malacrass() { new boss_hexlord_malacrass(); new spell_hexlord_unstable_affliction(); }
gpl-2.0
eiselekd/gcc
libgcc/config/arm/bpabi.c
9
1481
/* Miscellaneous BPABI functions. Copyright (C) 2003-2017 Free Software Foundation, Inc. Contributed by CodeSourcery, LLC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ extern long long __divdi3 (long long, long long); extern unsigned long long __udivdi3 (unsigned long long, unsigned long long); extern long long __gnu_ldivmod_helper (long long, long long, long long *); long long __gnu_ldivmod_helper (long long a, long long b, long long *remainder) { long long quotient; quotient = __divdi3 (a, b); *remainder = a - b * quotient; return quotient; }
gpl-2.0
mynew1/fun
dep/acelite/ace/ETCL/ETCL_y.cpp
265
39740
/* A Bison parser, made from ETCL/ETCL.yy by GNU Bison version 1.28 */ #define YYBISON 1 /* Identify Bison output. */ #define ETCL_GT 257 #define ETCL_GE 258 #define ETCL_LT 259 #define ETCL_LE 260 #define ETCL_EQ 261 #define ETCL_NE 262 #define ETCL_EXIST 263 #define ETCL_DEFAULT 264 #define ETCL_AND 265 #define ETCL_OR 266 #define ETCL_NOT 267 #define ETCL_IN 268 #define ETCL_TWIDDLE 269 #define ETCL_BOOLEAN 270 #define ETCL_PLUS 271 #define ETCL_MINUS 272 #define ETCL_MULT 273 #define ETCL_DIV 274 #define ETCL_UMINUS 275 #define ETCL_INTEGER 276 #define ETCL_FLOAT 277 #define ETCL_STRING 278 #define ETCL_RPAREN 279 #define ETCL_LPAREN 280 #define ETCL_RBRA 281 #define ETCL_LBRA 282 #define ETCL_IDENT 283 #define ETCL_UNSIGNED 284 #define ETCL_SIGNED 285 #define ETCL_DOUBLE 286 #define ETCL_CONSTRAINT 287 #define ETCL_COMPONENT 288 #define ETCL_WITH 289 #define ETCL_MAX 290 #define ETCL_MIN 291 #define ETCL_FIRST 292 #define ETCL_RANDOM 293 #define ETCL_DOLLAR 294 #define ETCL_DOT 295 #define ETCL_DISCRIMINANT 296 #define ETCL_LENGTH 297 #define ETCL_TYPE_ID 298 #define ETCL_REPOS_ID 299 //============================================================================= /** * @file ETCL_y.cpp * * $Id: ETCL_y.cpp 93651 2011-03-28 08:49:11Z johnnyw $ * * @author Carlos O'Ryan <coryan@uci.edu> based on previous work by Seth Widoff <sbw1@cs.wustl.edu> */ //============================================================================= #include "ace/ETCL/ETCL_y.h" #include "ace/ETCL/ETCL_Constraint.h" #include "ace/ETCL/ETCL_Interpreter.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL extern int yylex (void); extern void yyflush_current_buffer (void); static void yyerror (const char *) { // @@ TODO // Ignore error messages } ACE_END_VERSIONED_NAMESPACE_DECL #include <stdio.h> ACE_BEGIN_VERSIONED_NAMESPACE_DECL #ifndef __cplusplus #ifndef __STDC__ #define const #endif #endif #define YYFINAL 114 #define YYFLAG -32768 #define YYNTBASE 46 #define YYTRANSLATE(x) ((unsigned)(x) <= 299 ? yytranslate[x] : 65) static const char yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45 }; #if YYDEBUG != 0 static const short yyprhs[] = { 0, 0, 2, 4, 7, 10, 13, 15, 17, 21, 23, 27, 29, 33, 37, 41, 45, 49, 53, 55, 59, 64, 66, 70, 72, 76, 80, 82, 86, 90, 92, 95, 97, 101, 103, 106, 109, 111, 114, 117, 119, 121, 124, 128, 132, 135, 137, 138, 141, 144, 146, 148, 149, 152, 154, 156, 159, 161, 163, 165, 167, 169, 171, 176, 181, 184, 189, 190, 192, 195, 198 }; static const short yyrhs[] = { 48, 0, 47, 0, 37, 48, 0, 36, 48, 0, 35, 48, 0, 38, 0, 39, 0, 48, 12, 49, 0, 49, 0, 49, 11, 50, 0, 50, 0, 51, 7, 51, 0, 51, 8, 51, 0, 51, 3, 51, 0, 51, 4, 51, 0, 51, 5, 51, 0, 51, 6, 51, 0, 51, 0, 52, 14, 57, 0, 52, 14, 40, 57, 0, 52, 0, 53, 15, 53, 0, 53, 0, 53, 17, 54, 0, 53, 18, 54, 0, 54, 0, 54, 19, 55, 0, 54, 20, 55, 0, 55, 0, 13, 56, 0, 56, 0, 26, 48, 25, 0, 22, 0, 17, 22, 0, 18, 22, 0, 23, 0, 17, 23, 0, 18, 23, 0, 24, 0, 16, 0, 9, 29, 0, 9, 40, 57, 0, 10, 40, 57, 0, 40, 57, 0, 29, 0, 0, 41, 59, 0, 29, 58, 0, 60, 0, 61, 0, 0, 41, 59, 0, 60, 0, 61, 0, 29, 58, 0, 43, 0, 42, 0, 44, 0, 45, 0, 62, 0, 63, 0, 28, 22, 27, 58, 0, 26, 29, 25, 58, 0, 22, 58, 0, 26, 64, 25, 58, 0, 0, 22, 0, 17, 22, 0, 18, 22, 0, 24, 0 }; #endif #if YYDEBUG != 0 static const short yyrline[] = { 0, 92, 93, 96, 98, 100, 102, 104, 108, 110, 113, 115, 118, 120, 122, 124, 126, 128, 130, 133, 135, 137, 140, 142, 145, 147, 149, 152, 154, 156, 159, 161, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 194, 196, 199, 202, 203, 206, 208, 211, 212, 215, 217, 219, 221, 223, 225, 226, 229, 233, 237, 241, 245, 247, 249, 251, 253 }; #endif #if YYDEBUG != 0 || defined (YYERROR_VERBOSE) static const char * const yytname[] = { "$","error","$undefined.","ETCL_GT", "ETCL_GE","ETCL_LT","ETCL_LE","ETCL_EQ","ETCL_NE","ETCL_EXIST", "ETCL_DEFAULT","ETCL_AND","ETCL_OR","ETCL_NOT","ETCL_IN", "ETCL_TWIDDLE","ETCL_BOOLEAN","ETCL_PLUS","ETCL_MINUS","ETCL_MULT", "ETCL_DIV","ETCL_UMINUS","ETCL_INTEGER","ETCL_FLOAT","ETCL_STRING", "ETCL_RPAREN","ETCL_LPAREN","ETCL_RBRA","ETCL_LBRA","ETCL_IDENT", "ETCL_UNSIGNED","ETCL_SIGNED","ETCL_DOUBLE","ETCL_CONSTRAINT", "ETCL_COMPONENT","ETCL_WITH","ETCL_MAX","ETCL_MIN","ETCL_FIRST", "ETCL_RANDOM","ETCL_DOLLAR","ETCL_DOT","ETCL_DISCRIMINANT","ETCL_LENGTH", "ETCL_TYPE_ID","ETCL_REPOS_ID","constraint","preference","bool_or","bool_and", "bool_compare","expr_in","expr_twiddle","expr","term","factor_not","factor", "component","component_ext","component_dot","component_array","component_assoc", "component_pos","union_pos","union_val", 0 }; #endif static const short yyr1[] = { 0, 46, 46, 47, 47, 47, 47, 47, 48, 48, 49, 49, 50, 50, 50, 50, 50, 50, 50, 51, 51, 51, 52, 52, 53, 53, 53, 54, 54, 54, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 60, 61, 62, 63, 64, 64, 64, 64, 64 }; static const short yyr2[] = { 0, 1, 1, 2, 2, 2, 1, 1, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 1, 3, 4, 1, 3, 1, 3, 3, 1, 3, 3, 1, 2, 1, 3, 1, 2, 2, 1, 2, 2, 1, 1, 2, 3, 3, 2, 1, 0, 2, 2, 1, 1, 0, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 4, 4, 2, 4, 0, 1, 2, 2, 1 }; static const short yydefact[] = { 0, 0, 0, 0, 40, 0, 0, 33, 36, 39, 0, 45, 0, 0, 0, 6, 7, 46, 2, 1, 9, 11, 18, 21, 23, 26, 29, 31, 41, 46, 46, 30, 34, 37, 35, 38, 0, 5, 4, 3, 0, 0, 51, 0, 44, 49, 50, 0, 0, 0, 0, 0, 0, 0, 0, 46, 0, 0, 0, 0, 0, 42, 43, 32, 0, 0, 0, 48, 53, 54, 51, 66, 51, 57, 56, 58, 59, 47, 60, 61, 8, 10, 14, 15, 16, 17, 12, 13, 46, 19, 22, 24, 25, 27, 28, 51, 51, 52, 64, 0, 0, 67, 70, 0, 55, 20, 63, 62, 68, 69, 51, 65, 0, 0, 0 }; static const short yydefgoto[] = { 112, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 44, 67, 77, 68, 69, 78, 79, 103 }; static const short yypact[] = { 41, -13, -39, 94,-32768, 38, 46,-32768,-32768,-32768, 73, -32768, 73, 73, 73,-32768,-32768, -9,-32768, -6, 7, -32768, 121, -5, 19, 55,-32768,-32768,-32768, -9, -9, -32768,-32768,-32768,-32768,-32768, 21, -6, -6, -6, 6, 25, -2, -1,-32768,-32768,-32768, 73, 73, 73, 73, 73, 73, 73, 73, -18, 73, 73, 73, 73, 73, -32768,-32768,-32768, 27, 29, -1,-32768,-32768,-32768, -2, 31, -2,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 7, -32768,-32768,-32768,-32768,-32768,-32768,-32768, -9,-32768, 67, 55, 55,-32768,-32768, -2, -2,-32768,-32768, 44, 50, -32768,-32768, 69,-32768,-32768,-32768,-32768,-32768,-32768, -2, -32768, 98, 100,-32768 }; static const short yypgoto[] = {-32768, -32768, 95, 54, 58, 86,-32768, 59, 30, 33, 111, -26, -65, 53, -17, -15,-32768,-32768,-32768 }; #define YYLAST 140 static const short yytable[] = { 45, 30, 46, 61, 62, 98, 47, 104, 40, 55, 41, 42, 45, 45, 46, 46, 28, 40, 48, 41, 42, 70, 88, 43, 40, 71, 41, 29, 72, 89, 106, 107, 43, 47, 56, 64, 57, 58, 45, 66, 46, 73, 74, 75, 76, 111, 63, 65, 99, 100, 1, 2, 95, 101, 3, 102, 96, 4, 5, 6, 32, 33, 105, 7, 8, 9, 108, 10, 34, 35, 11, 45, 109, 46, 59, 60, 12, 13, 14, 15, 16, 17, 1, 2, 57, 58, 3, 91, 92, 4, 5, 6, 93, 94, 110, 7, 8, 9, 113, 10, 114, 80, 11, 1, 2, 36, 81, 37, 38, 39, 4, 5, 6, 17, 31, 90, 7, 8, 9, 97, 10, 0, 0, 11, 49, 50, 51, 52, 53, 54, 0, 0, 0, 0, 17, 82, 83, 84, 85, 86, 87 }; static const short yycheck[] = { 17, 40, 17, 29, 30, 70, 12, 72, 26, 14, 28, 29, 29, 30, 29, 30, 29, 26, 11, 28, 29, 22, 40, 41, 26, 26, 28, 40, 29, 55, 95, 96, 41, 12, 15, 29, 17, 18, 55, 41, 55, 42, 43, 44, 45, 110, 25, 22, 17, 18, 9, 10, 25, 22, 13, 24, 27, 16, 17, 18, 22, 23, 88, 22, 23, 24, 22, 26, 22, 23, 29, 88, 22, 88, 19, 20, 35, 36, 37, 38, 39, 40, 9, 10, 17, 18, 13, 57, 58, 16, 17, 18, 59, 60, 25, 22, 23, 24, 0, 26, 0, 47, 29, 9, 10, 10, 48, 12, 13, 14, 16, 17, 18, 40, 3, 56, 22, 23, 24, 66, 26, -1, -1, 29, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, 40, 49, 50, 51, 52, 53, 54 }; /* -*-C-*- Note some compilers choke on comments on `//#line' lines. */ //#line 3 "/pkg/gnu/share/bison.simple" /* This file comes from bison-1.28. */ /* Skeleton output parser for bison, Copyright (C) 1984, 1989, 1990 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* As a special exception, when this file is copied by Bison into a Bison output file, you may use that output file without restriction. This special exception was added by the Free Software Foundation in version 1.24 of Bison. */ /* This is the parser code that is written into each bison parser when the %semantic_parser declaration is not specified in the grammar. It was written by Richard Stallman by simplifying the hairy parser used when %semantic_parser is specified. */ ACE_END_VERSIONED_NAMESPACE_DECL #ifndef YYSTACK_USE_ALLOCA #ifdef alloca #define YYSTACK_USE_ALLOCA #else /* alloca not defined */ #ifdef __GNUC__ #define YYSTACK_USE_ALLOCA #define alloca __builtin_alloca #else /* not GNU C. */ #if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || (defined (__sun) && defined (__i386)) #define YYSTACK_USE_ALLOCA #include <alloca.h> #else /* not sparc */ /* We think this test detects Watcom and Microsoft C. */ /* This used to test MSDOS, but that is a bad idea since that symbol is in the user namespace. */ #if (defined (_MSDOS) || defined (_MSDOS_)) && !defined (__TURBOC__) #if 0 /* No need for malloc.h, which pollutes the namespace; instead, just don't use alloca. */ #include <malloc.h> #endif #else /* not MSDOS, or __TURBOC__ */ #if defined(_AIX) /* I don't know what this was needed for, but it pollutes the namespace. So I turned it off. rms, 2 May 1997. */ /* #include <malloc.h> */ #pragma alloca #define YYSTACK_USE_ALLOCA #else /* not MSDOS, or __TURBOC__, or _AIX */ #if 0 #ifdef __hpux /* haible@ilog.fr says this works for HPUX 9.05 and up, and on HPUX 10. Eventually we can turn this on. */ #define YYSTACK_USE_ALLOCA #define alloca __builtin_alloca #endif /* __hpux */ #endif #endif /* not _AIX */ #endif /* not MSDOS, or __TURBOC__ */ #endif /* not sparc */ #endif /* not GNU C */ #endif /* alloca not defined */ #endif /* YYSTACK_USE_ALLOCA not defined */ #ifdef YYSTACK_USE_ALLOCA #define YYSTACK_ALLOC alloca #else #define YYSTACK_ALLOC malloc #endif /* Note: there must be only one dollar sign in this file. It is replaced by the list of actions, each action as one case of the switch. */ #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY -2 #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrlab1 /* Like YYERROR except do call yyerror. This remains here temporarily to ease the transition to the new meaning of YYERROR, for GCC. Once GCC version 2 has supplanted version 1, this can go. */ #define YYFAIL goto yyerrlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(token, value) \ do \ if (yychar == YYEMPTY && yylen == 1) \ { yychar = (token), yylval = (value); \ yychar1 = YYTRANSLATE (yychar); \ YYPOPSTACK; \ goto yybackup; \ } \ else \ { yyerror ("syntax error: cannot back up"); YYERROR; } \ while (0) #define YYTERROR 1 #define YYERRCODE 256 #ifndef YYPURE #define YYLEX yylex() #endif #ifdef YYPURE #ifdef YYLSP_NEEDED #ifdef YYLEX_PARAM #define YYLEX yylex(&yylval, &yylloc, YYLEX_PARAM) #else #define YYLEX yylex(&yylval, &yylloc) #endif #else /* not YYLSP_NEEDED */ #ifdef YYLEX_PARAM #define YYLEX yylex(&yylval, YYLEX_PARAM) #else #define YYLEX yylex(&yylval) #endif #endif /* not YYLSP_NEEDED */ #endif ACE_BEGIN_VERSIONED_NAMESPACE_DECL /* If nonreentrant, generate the variables here */ #ifndef YYPURE int yychar; /* the lookahead symbol */ YYSTYPE yylval; /* the semantic value of the */ /* lookahead symbol */ YYSTYPE yyval; /* the variable used to return */ /* semantic values from the action */ /* routines */ #ifdef YYLSP_NEEDED YYLTYPE yylloc; /* location data for the lookahead */ /* symbol */ #endif int yynerrs; /* number of parse errors so far */ #endif /* not YYPURE */ #if YYDEBUG != 0 int yydebug; /* nonzero means print parse trace */ /* Since this is uninitialized, it does not stop multiple parsers from coexisting. */ #endif /* YYINITDEPTH indicates the initial size of the parser's stacks */ #ifndef YYINITDEPTH #define YYINITDEPTH 200 #endif /* YYMAXDEPTH is the maximum size the stacks can grow to (effective only if the built-in stack extension method is used). */ #if YYMAXDEPTH == 0 #undef YYMAXDEPTH #endif #ifndef YYMAXDEPTH #define YYMAXDEPTH 10000 #endif /* Define __yy_memcpy. Note that the size argument should be passed with type unsigned int, because that is what the non-GCC definitions require. With GCC, __builtin_memcpy takes an arg of type size_t, but it can handle unsigned int. */ #if __GNUC__ > 1 /* GNU C and GNU C++ define this. */ #define __yy_memcpy(TO,FROM,COUNT) __builtin_memcpy(TO,FROM,COUNT) #else /* not GNU C or C++ */ #ifndef __cplusplus /* This is the most reliable way to avoid incompatibilities in available built-in functions on various systems. */ static void __yy_memcpy (to, from, count) char *to; char *from; unsigned int count; { register char *f = from; register char *t = to; register int i = count; while (i-- > 0) *t++ = *f++; } #else /* __cplusplus */ /* This is the most reliable way to avoid incompatibilities in available built-in functions on various systems. */ static void __yy_memcpy (char *to, char *from, unsigned int count) { register char *t = to; register char *f = from; register int i = count; while (i-- > 0) *t++ = *f++; } #endif #endif //#line 217 "/pkg/gnu/share/bison.simple" /* The user can define YYPARSE_PARAM as the name of an argument to be passed into yyparse. The argument should have type void *. It should actually point to an object. Grammar actions can access the variable by casting it to the proper pointer type. */ #ifdef YYPARSE_PARAM #ifdef __cplusplus #define YYPARSE_PARAM_ARG void *YYPARSE_PARAM #define YYPARSE_PARAM_DECL #else /* not __cplusplus */ #define YYPARSE_PARAM_ARG YYPARSE_PARAM #define YYPARSE_PARAM_DECL void *YYPARSE_PARAM; #endif /* not __cplusplus */ #else /* not YYPARSE_PARAM */ #define YYPARSE_PARAM_ARG #define YYPARSE_PARAM_DECL #endif /* not YYPARSE_PARAM */ /* Prevent warning if -Wstrict-prototypes. */ #ifdef __GNUC__ #ifdef YYPARSE_PARAM int yyparse (void *); #else int yyparse (void); #endif #endif int yyparse(YYPARSE_PARAM_ARG) YYPARSE_PARAM_DECL { register int yystate; register int yyn; register short *yyssp; register YYSTYPE *yyvsp; int yyerrstatus; /* number of tokens to shift before error messages enabled */ int yychar1 = 0; /* lookahead token as an internal (translated) token number */ short yyssa[YYINITDEPTH]; /* the state stack */ YYSTYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */ short *yyss = yyssa; /* refer to the stacks thru separate pointers */ YYSTYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */ #ifdef YYLSP_NEEDED YYLTYPE yylsa[YYINITDEPTH]; /* the location stack */ YYLTYPE *yyls = yylsa; YYLTYPE *yylsp; #define YYPOPSTACK (yyvsp--, yyssp--, yylsp--) #else #define YYPOPSTACK (yyvsp--, yyssp--) #endif int yystacksize = YYINITDEPTH; int yyfree_stacks = 0; #ifdef YYPURE int yychar; YYSTYPE yylval; int yynerrs; #ifdef YYLSP_NEEDED YYLTYPE yylloc; #endif #endif int yylen; #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Starting parse\n"); #endif yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ /* Initialize stack pointers. Waste one element of value and location stack so that they stay on the same level as the state stack. The wasted elements are never initialized. */ yyssp = yyss - 1; yyvsp = yyvs; #ifdef YYLSP_NEEDED yylsp = yyls; #endif /* Push a new state, which is found in yystate . */ /* In all cases, when you get here, the value and location stacks have just been pushed. so pushing a state here evens the stacks. */ yynewstate: *++yyssp = yystate; if (yyssp >= yyss + yystacksize - 1) { /* Give user a chance to reallocate the stack */ /* Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; short *yyss1 = yyss; #ifdef YYLSP_NEEDED YYLTYPE *yyls1 = yyls; #endif /* Get the current used size of the three stacks, in elements. */ int size = yyssp - yyss + 1; #ifdef yyoverflow /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. */ #ifdef YYLSP_NEEDED /* This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow("parser stack overflow", &yyss1, size * sizeof (*yyssp), &yyvs1, size * sizeof (*yyvsp), &yyls1, size * sizeof (*yylsp), &yystacksize); #else yyoverflow("parser stack overflow", &yyss1, size * sizeof (*yyssp), &yyvs1, size * sizeof (*yyvsp), &yystacksize); #endif yyss = yyss1; yyvs = yyvs1; #ifdef YYLSP_NEEDED yyls = yyls1; #endif #else /* no yyoverflow */ /* Extend the stack our own way. */ if (yystacksize >= YYMAXDEPTH) { yyerror("parser stack overflow"); if (yyfree_stacks) { ACE_OS::free (yyss); ACE_OS::free (yyvs); #ifdef YYLSP_NEEDED ACE_OS::free (yyls); #endif } return 2; } yystacksize *= 2; if (yystacksize > YYMAXDEPTH) yystacksize = YYMAXDEPTH; #ifndef YYSTACK_USE_ALLOCA yyfree_stacks = 1; #endif yyss = (short *) YYSTACK_ALLOC (yystacksize * sizeof (*yyssp)); __yy_memcpy ((char *)yyss, (char *)yyss1, size * (unsigned int) sizeof (*yyssp)); yyvs = (YYSTYPE *) YYSTACK_ALLOC (yystacksize * sizeof (*yyvsp)); __yy_memcpy ((char *)yyvs, (char *)yyvs1, size * (unsigned int) sizeof (*yyvsp)); #ifdef YYLSP_NEEDED yyls = (YYLTYPE *) YYSTACK_ALLOC (yystacksize * sizeof (*yylsp)); __yy_memcpy ((char *)yyls, (char *)yyls1, size * (unsigned int) sizeof (*yylsp)); #endif #endif /* no yyoverflow */ yyssp = yyss + size - 1; yyvsp = yyvs + size - 1; #ifdef YYLSP_NEEDED yylsp = yyls + size - 1; #endif #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Stack size increased to %d\n", yystacksize); #endif if (yyssp >= yyss + yystacksize - 1) YYABORT; } #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Entering state %d\n", yystate); #endif goto yybackup; yybackup: /* Do appropriate processing given the current state. */ /* Read a lookahead token if we need one and don't already have one. */ /* yyresume: */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yyn == YYFLAG) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* yychar is either YYEMPTY or YYEOF or a valid token in external form. */ if (yychar == YYEMPTY) { #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Reading a token: "); #endif yychar = YYLEX; } /* Convert token to internal form (in yychar1) for indexing tables with */ if (yychar <= 0) /* This means end of input. */ { yychar1 = 0; yychar = YYEOF; /* Don't call YYLEX any more */ #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Now at end of input.\n"); #endif } else { yychar1 = YYTRANSLATE(yychar); #if YYDEBUG != 0 if (yydebug) { ACE_OS::fprintf (stderr, "Next token is %d (%s", yychar, yytname[yychar1]); /* Give the individual parser a way to print the precise meaning of a token, for further debugging info. */ #ifdef YYPRINT YYPRINT (stderr, yychar, yylval); #endif ACE_OS::fprintf (stderr, ")\n"); } #endif } yyn += yychar1; if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1) goto yydefault; yyn = yytable[yyn]; /* yyn is what to do for this token type in this state. Negative => reduce, -yyn is rule number. Positive => shift, yyn is new state. New state is final state => don't bother to shift, just return success. 0, or most negative number => error. */ if (yyn < 0) { if (yyn == YYFLAG) goto yyerrlab; yyn = -yyn; goto yyreduce; } else if (yyn == 0) goto yyerrlab; if (yyn == YYFINAL) YYACCEPT; /* Shift the lookahead token. */ #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1]); #endif /* Discard the token being shifted unless it is eof. */ if (yychar != YYEOF) yychar = YYEMPTY; *++yyvsp = yylval; #ifdef YYLSP_NEEDED *++yylsp = yylloc; #endif /* count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; yystate = yyn; goto yynewstate; /* Do the default action for the current state. */ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; /* Do a reduction. yyn is the number of a rule to reduce with. */ yyreduce: yylen = yyr2[yyn]; if (yylen > 0) yyval = yyvsp[1-yylen]; /* implement default value of the action */ #if YYDEBUG != 0 if (yydebug) { int i; ACE_OS::fprintf (stderr, "Reducing via rule %d (line %d), ", yyn, yyrline[yyn]); /* Print the symbols being reduced, and their result. */ for (i = yyprhs[yyn]; yyrhs[i] > 0; i++) ACE_OS::fprintf (stderr, "%s ", yytname[yyrhs[i]]); ACE_OS::fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]); } #endif switch (yyn) { case 3: //#line 97 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Preference (ETCL_MIN, yyvsp[0].constraint); ; break;} case 4: //#line 99 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Preference (ETCL_MAX, yyvsp[0].constraint); ; break;} case 5: //#line 101 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Preference (ETCL_WITH, yyvsp[0].constraint); ; break;} case 6: //#line 103 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Preference (ETCL_FIRST); ; break;} case 7: //#line 105 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Preference (ETCL_RANDOM); ; break;} case 8: //#line 109 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_OR, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 10: //#line 114 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_AND, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 12: //#line 119 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_EQ, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 13: //#line 121 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_NE, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 14: //#line 123 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_GT, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 15: //#line 125 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_GE, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 16: //#line 127 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_LT, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 17: //#line 129 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_LE, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 19: //#line 134 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_IN, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 20: //#line 136 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_IN, yyvsp[-3].constraint, yyvsp[0].constraint); ; break;} case 22: //#line 141 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_TWIDDLE, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 24: //#line 146 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_PLUS, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 25: //#line 148 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_MINUS, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 27: //#line 153 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_MULT, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 28: //#line 155 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Binary_Expr (ETCL_DIV, yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 30: //#line 160 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Unary_Expr (ETCL_NOT, yyvsp[0].constraint); ; break;} case 32: //#line 165 "ETCL/ETCL.yy" { yyval.constraint = yyvsp[-1].constraint; ; break;} case 33: //#line 167 "ETCL/ETCL.yy" { yyval.constraint = yyvsp[0].constraint; ; break;} case 34: //#line 169 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Unary_Expr (ETCL_PLUS, yyvsp[0].constraint); ; break;} case 35: //#line 171 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Unary_Expr (ETCL_MINUS, yyvsp[0].constraint); ; break;} case 36: //#line 173 "ETCL/ETCL.yy" { yyval.constraint = yyvsp[0].constraint; ; break;} case 37: //#line 175 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Unary_Expr (ETCL_PLUS, yyvsp[0].constraint); ; break;} case 38: //#line 177 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Unary_Expr (ETCL_MINUS, yyvsp[0].constraint); ; break;} case 39: //#line 179 "ETCL/ETCL.yy" { yyval.constraint = yyvsp[0].constraint; ; break;} case 40: //#line 181 "ETCL/ETCL.yy" { yyval.constraint = yyvsp[0].constraint; ; break;} case 41: //#line 183 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Exist (yyvsp[0].constraint); ; break;} case 42: //#line 185 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Exist (yyvsp[0].constraint); ; break;} case 43: //#line 187 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Default (yyvsp[0].constraint); ; break;} case 44: //#line 189 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Eval (yyvsp[0].constraint); ; break;} case 45: //#line 191 "ETCL/ETCL.yy" { yyval.constraint = yyvsp[0].constraint; ; break;} case 46: //#line 195 "ETCL/ETCL.yy" { yyval.constraint = 0; ; break;} case 47: //#line 197 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Dot (yyvsp[0].constraint); ; break;} case 48: //#line 200 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Component (yyvsp[-1].constraint, yyvsp[0].constraint); ; break;} case 51: //#line 207 "ETCL/ETCL.yy" { yyval.constraint = 0; ; break;} case 52: //#line 209 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Dot (yyvsp[0].constraint); ; break;} case 55: //#line 216 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Component (yyvsp[-1].constraint, yyvsp[0].constraint); ; break;} case 56: //#line 218 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Special (ETCL_LENGTH); ; break;} case 57: //#line 220 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Special (ETCL_DISCRIMINANT); ; break;} case 58: //#line 222 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Special (ETCL_TYPE_ID); ; break;} case 59: //#line 224 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Special (ETCL_REPOS_ID); ; break;} case 62: //#line 230 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Component_Array (yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 63: //#line 234 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Component_Assoc (yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 64: //#line 238 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Component_Pos (yyvsp[-1].constraint, yyvsp[0].constraint); ; break;} case 65: //#line 242 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Union_Pos (yyvsp[-2].constraint, yyvsp[0].constraint); ; break;} case 66: //#line 246 "ETCL/ETCL.yy" { yyval.constraint = 0; ; break;} case 67: //#line 248 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Union_Value (+1, yyvsp[0].constraint); ; break;} case 68: //#line 250 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Union_Value (+1, yyvsp[0].constraint); ; break;} case 69: //#line 252 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Union_Value (-1, yyvsp[0].constraint); ; break;} case 70: //#line 254 "ETCL/ETCL.yy" { yyval.constraint = new ETCL_Union_Value (yyvsp[0].constraint); ; break;} } /* the action file gets copied in in place of this dollarsign */ //#line 543 "/pkg/gnu/share/bison.simple" yyvsp -= yylen; yyssp -= yylen; #ifdef YYLSP_NEEDED yylsp -= yylen; #endif #if YYDEBUG != 0 if (yydebug) { short *ssp1 = yyss - 1; ACE_OS::fprintf (stderr, "state stack now"); while (ssp1 != yyssp) ACE_OS::fprintf (stderr, " %d", *++ssp1); ACE_OS::fprintf (stderr, "\n"); } #endif *++yyvsp = yyval; #ifdef YYLSP_NEEDED yylsp++; if (yylen == 0) { yylsp->first_line = yylloc.first_line; yylsp->first_column = yylloc.first_column; yylsp->last_line = (yylsp-1)->last_line; yylsp->last_column = (yylsp-1)->last_column; yylsp->text = 0; } else { yylsp->last_line = (yylsp+yylen-1)->last_line; yylsp->last_column = (yylsp+yylen-1)->last_column; } #endif /* Now "shift" the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTBASE] + *yyssp; if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTBASE]; goto yynewstate; yyerrlab: /* here on detecting error */ if (! yyerrstatus) /* If not already recovering from an error, report this error. */ { ++yynerrs; #ifdef YYERROR_VERBOSE yyn = yypact[yystate]; if (yyn > YYFLAG && yyn < YYLAST) { int size = 0; char *msg; int x, count; count = 0; /* Start X at -yyn if nec to avoid negative indexes in yycheck. */ for (x = (yyn < 0 ? -yyn : 0); x < (sizeof(yytname) / sizeof(char *)); x++) if (yycheck[x + yyn] == x) size += ACE_OS::strlen(yytname[x]) + 15, count++; msg = (char *) ACE_OS::malloc(size + 15); if (msg != 0) { ACE_OS::strcpy(msg, "parse error"); if (count < 5) { count = 0; for (x = (yyn < 0 ? -yyn : 0); x < (sizeof(yytname) / sizeof(char *)); x++) if (yycheck[x + yyn] == x) { ACE_OS::strcat(msg, count == 0 ? ", expecting `" : " or `"); ACE_OS::strcat(msg, yytname[x]); ACE_OS::strcat(msg, "'"); count++; } } yyerror(msg); ACE_OS::free(msg); } else yyerror ("parse error; also virtual memory exceeded"); } else #endif /* YYERROR_VERBOSE */ yyerror("parse error"); } goto yyerrlab1; yyerrlab1: /* here on error raised explicitly by an action */ if (yyerrstatus == 3) { /* if just tried and failed to reuse lookahead token after an error, discard it. */ /* return failure if at end of input */ if (yychar == YYEOF) YYABORT; #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1]); #endif yychar = YYEMPTY; } /* Else will try to reuse lookahead token after shifting the error token. */ yyerrstatus = 3; /* Each real token shifted decrements this */ goto yyerrhandle; yyerrdefault: /* current state does not do anything special for the error token. */ #if 0 /* This is wrong; only states that explicitly want error tokens should shift them. */ yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/ if (yyn) goto yydefault; #endif yyerrpop: /* pop the current state because it cannot handle the error token */ if (yyssp == yyss) YYABORT; yyvsp--; yystate = *--yyssp; #ifdef YYLSP_NEEDED yylsp--; #endif #if YYDEBUG != 0 if (yydebug) { short *ssp1 = yyss - 1; ACE_OS::fprintf (stderr, "Error: state stack now"); while (ssp1 != yyssp) ACE_OS::fprintf (stderr, " %d", *++ssp1); ACE_OS::fprintf (stderr, "\n"); } #endif yyerrhandle: yyn = yypact[yystate]; if (yyn == YYFLAG) goto yyerrdefault; yyn += YYTERROR; if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR) goto yyerrdefault; yyn = yytable[yyn]; if (yyn < 0) { if (yyn == YYFLAG) goto yyerrpop; yyn = -yyn; goto yyreduce; } else if (yyn == 0) goto yyerrpop; if (yyn == YYFINAL) YYACCEPT; #if YYDEBUG != 0 if (yydebug) ACE_OS::fprintf(stderr, "Shifting error token, "); #endif *++yyvsp = yylval; #ifdef YYLSP_NEEDED *++yylsp = yylloc; #endif yystate = yyn; goto yynewstate; yyacceptlab: /* YYACCEPT comes here. */ if (yyfree_stacks) { ACE_OS::free (yyss); ACE_OS::free (yyvs); #ifdef YYLSP_NEEDED ACE_OS::free (yyls); #endif } return 0; yyabortlab: /* YYABORT comes here. */ /* Flush out yy_current_buffer before next parse. Since there is no error recovery, the buffer could still contain tokens from this parse. */ yyflush_current_buffer(); if (yyfree_stacks) { ACE_OS::free (yyss); ACE_OS::free (yyvs); #ifdef YYLSP_NEEDED ACE_OS::free (yyls); #endif } return 1; } //#line 257 "ETCL/ETCL.yy" ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
alysher/mangos-cata-pb
dep/ACE_wrappers/ace/Service_Types.cpp
265
11035
// $Id: Service_Types.cpp 91813 2010-09-17 07:52:52Z johnnyw $ #include "ace/Service_Types.h" #if !defined (__ACE_INLINE__) #include "ace/Service_Types.inl" #endif /* __ACE_INLINE__ */ #include "ace/Stream_Modules.h" #include "ace/Stream.h" #include "ace/OS_NS_stdio.h" #include "ace/OS_NS_string.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL typedef ACE_Stream<ACE_SYNCH> MT_Stream; typedef ACE_Module<ACE_SYNCH> MT_Module; typedef ACE_Task<ACE_SYNCH> MT_Task; ACE_ALLOC_HOOK_DEFINE(ACE_Service_Type_Impl) void ACE_Service_Type_Impl::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Service_Type_Impl::dump"); #endif /* ACE_HAS_DUMP */ } ACE_Service_Type_Impl::ACE_Service_Type_Impl (void *so, const ACE_TCHAR *s_name, u_int f, ACE_Service_Object_Exterminator gobbler, int stype) : name_ (0), obj_ (so), gobbler_ (gobbler), flags_ (f), service_type_ (stype) { ACE_TRACE ("ACE_Service_Type_Impl::ACE_Service_Type_Impl"); this->name (s_name); } ACE_Service_Type_Impl::~ACE_Service_Type_Impl (void) { ACE_TRACE ("ACE_Service_Type_Impl::~ACE_Service_Type_Impl"); // It's ok to call this, even though we may have already deleted it // in the fini() method since it would then be NULL. delete [] const_cast <ACE_TCHAR *> (this->name_); } int ACE_Service_Type_Impl::fini (void) const { ACE_TRACE ("ACE_Service_Type_Impl::fini"); delete [] const_cast <ACE_TCHAR *> (this->name_); (const_cast <ACE_Service_Type_Impl *> (this))->name_ = 0; if (ACE_BIT_ENABLED (this->flags_, ACE_Service_Type::DELETE_OBJ)) { if (gobbler_ != 0) gobbler_ (this->object ()); else // Cast to remove const-ness. operator delete ((void *) this->object ()); } if (ACE_BIT_ENABLED (this->flags_, ACE_Service_Type::DELETE_THIS)) delete const_cast <ACE_Service_Type_Impl *> (this); return 0; } ACE_Service_Object_Type::ACE_Service_Object_Type (void *so, const ACE_TCHAR *s_name, u_int f, ACE_Service_Object_Exterminator gobbler, int stype) : ACE_Service_Type_Impl (so, s_name, f, gobbler, stype) , initialized_ (-1) { ACE_TRACE ("ACE_Service_Object_Type::ACE_Service_Object_Type"); } int ACE_Service_Object_Type::init (int argc, ACE_TCHAR *argv[]) const { ACE_TRACE ("ACE_Service_Object_Type::init"); void * const obj = this->object (); ACE_Service_Object * const so = static_cast<ACE_Service_Object *> (obj); if (so == 0) return -1; this->initialized_ = so->init (argc, argv); return this->initialized_; } int ACE_Service_Object_Type::fini (void) const { ACE_TRACE ("ACE_Service_Object_Type::fini"); void * const obj = this->object (); ACE_Service_Object * const so = static_cast<ACE_Service_Object *> (obj); // Call fini() if an only if, the object was successfuly // initialized, i.e. init() returned 0. This is necessary to // maintain the ctor/dtor-like semantics for init/fini. if (so != 0 && this->initialized_ == 0) so->fini (); return ACE_Service_Type_Impl::fini (); } ACE_Service_Object_Type::~ACE_Service_Object_Type (void) { ACE_TRACE ("ACE_Service_Object_Type::~ACE_Service_Object_Type"); } int ACE_Service_Object_Type::suspend (void) const { ACE_TRACE ("ACE_Service_Object_Type::suspend"); return static_cast<ACE_Service_Object *> (this->object ())->suspend (); } int ACE_Service_Object_Type::resume (void) const { ACE_TRACE ("ACE_Service_Object_Type::resume"); return static_cast<ACE_Service_Object *> (this->object ())->resume (); } int ACE_Service_Object_Type::info (ACE_TCHAR **str, size_t len) const { ACE_TRACE ("ACE_Service_Object_Type::info"); return static_cast<ACE_Service_Object *> (this->object ())->info (str, len); } ACE_ALLOC_HOOK_DEFINE(ACE_Module_Type) void ACE_Module_Type::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Module_Type::dump"); #endif /* ACE_HAS_DUMP */ } ACE_Module_Type::ACE_Module_Type (void *m, const ACE_TCHAR *m_name, u_int f, int stype) : ACE_Service_Type_Impl (m, m_name, f, 0, stype) { ACE_TRACE ("ACE_Module_Type::ACE_Module_Type"); } ACE_Module_Type::~ACE_Module_Type (void) { ACE_TRACE ("ACE_Module_Type::~ACE_Module_Type"); } int ACE_Module_Type::init (int argc, ACE_TCHAR *argv[]) const { ACE_TRACE ("ACE_Module_Type::init"); void *obj = this->object (); MT_Module *mod = (MT_Module *) obj; // // Change the Module's name to what's in the svc.conf file. // We must do this so the names match up so everything shuts // down properly during the call to ACE_Stream_Type::fini // which calls MT_Stream::remove([name]) for all the modules. // If the calls to remove fail, we end up with a double delete // during shutdown. Bugzilla #3847 // mod->name (this->name_); MT_Task *reader = mod->reader (); MT_Task *writer = mod->writer (); if (reader->init (argc, argv) == -1 || writer->init (argc, argv) == -1) return -1; else return 0; } int ACE_Module_Type::suspend (void) const { ACE_TRACE ("ACE_Module_Type::suspend"); void *obj = this->object (); MT_Module *mod = (MT_Module *) obj; MT_Task *reader = mod->reader (); MT_Task *writer = mod->writer (); if (reader->suspend () == -1 || writer->suspend () == -1) return -1; else return 0; } int ACE_Module_Type::resume (void) const { ACE_TRACE ("ACE_Module_Type::resume"); void *obj = this->object (); MT_Module *mod = (MT_Module *) obj; MT_Task *reader = mod->reader (); MT_Task *writer = mod->writer (); if (reader->resume () == -1 || writer->resume () == -1) return -1; else return 0; } // Note, these operations are somewhat too familiar with the // implementation of ACE_Module and ACE_Module::close... int ACE_Module_Type::fini (void) const { ACE_TRACE ("ACE_Module_Type::fini"); void *obj = this->object (); MT_Module *mod = (MT_Module *) obj; MT_Task *reader = mod->reader (); MT_Task *writer = mod->writer (); if (reader != 0) reader->fini (); if (writer != 0) writer->fini (); // Close the module and delete the memory. mod->close (MT_Module::M_DELETE); return ACE_Service_Type_Impl::fini (); } int ACE_Module_Type::info (ACE_TCHAR **str, size_t len) const { ACE_TRACE ("ACE_Module_Type::info"); ACE_TCHAR buf[BUFSIZ]; ACE_OS::sprintf (buf, ACE_TEXT ("%s\t %s"), this->name (), ACE_TEXT ("# ACE_Module\n")); if (*str == 0 && (*str = ACE_OS::strdup (buf)) == 0) return -1; else ACE_OS::strsncpy (*str, buf, len); return static_cast<int> (ACE_OS::strlen (buf)); } void ACE_Module_Type::link (ACE_Module_Type *n) { ACE_TRACE ("ACE_Module_Type::link"); this->link_ = n; } ACE_Module_Type * ACE_Module_Type::link (void) const { ACE_TRACE ("ACE_Module_Type::link"); return this->link_; } ACE_ALLOC_HOOK_DEFINE(ACE_Stream_Type) void ACE_Stream_Type::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Stream_Type::dump"); #endif /* ACE_HAS_DUMP */ } int ACE_Stream_Type::init (int, ACE_TCHAR *[]) const { ACE_TRACE ("ACE_Stream_Type::init"); return 0; } int ACE_Stream_Type::suspend (void) const { ACE_TRACE ("ACE_Stream_Type::suspend"); for (ACE_Module_Type *m = this->head_; m != 0; m = m->link ()) m->suspend (); return 0; } int ACE_Stream_Type::resume (void) const { ACE_TRACE ("ACE_Stream_Type::resume"); for (ACE_Module_Type *m = this->head_; m != 0; m = m->link ()) m->resume (); return 0; } ACE_Stream_Type::ACE_Stream_Type (void *s, const ACE_TCHAR *s_name, u_int f, int stype) : ACE_Service_Type_Impl (s, s_name, f, 0, stype), head_ (0) { ACE_TRACE ("ACE_Stream_Type::ACE_Stream_Type"); } ACE_Stream_Type::~ACE_Stream_Type (void) { ACE_TRACE ("ACE_Stream_Type::~ACE_Stream_Type"); } int ACE_Stream_Type::info (ACE_TCHAR **str, size_t len) const { ACE_TRACE ("ACE_Stream_Type::info"); ACE_TCHAR buf[BUFSIZ]; ACE_OS::sprintf (buf, ACE_TEXT ("%s\t %s"), this->name (), ACE_TEXT ("# STREAM\n")); if (*str == 0 && (*str = ACE_OS::strdup (buf)) == 0) return -1; else ACE_OS::strsncpy (*str, buf, len); return static_cast<int> (ACE_OS::strlen (buf)); } int ACE_Stream_Type::fini (void) const { ACE_TRACE ("ACE_Stream_Type::fini"); void *obj = this->object (); MT_Stream *str = (MT_Stream *) obj; for (ACE_Module_Type *m = this->head_; m != 0;) { ACE_Module_Type *t = m->link (); // Final arg is an indication to *not* delete the Module. str->remove (m->name (), MT_Module::M_DELETE_NONE); m = t; } str->close (); return ACE_Service_Type_Impl::fini (); } // Locate and remove <mod_name> from the ACE_Stream. int ACE_Stream_Type::remove (ACE_Module_Type *mod) { ACE_TRACE ("ACE_Stream_Type::remove"); ACE_Module_Type *prev = 0; void *obj = this->object (); MT_Stream *str = (MT_Stream *) obj; int result = 0; for (ACE_Module_Type *m = this->head_; m != 0; ) { // We need to do this first so we don't bomb out if we delete m! ACE_Module_Type *link = m->link (); if (m == mod) { if (prev == 0) this->head_ = link; else prev->link (link); // Final arg is an indication to *not* delete the Module. if (str->remove (m->name (), MT_Module::M_DELETE_NONE) == -1) result = -1; // Do not call m->fini (); as this will result in a double delete // of the ACE_Module_type when ACE_Service_Repository::fini is called } else prev = m; m = link; } return result; } int ACE_Stream_Type::push (ACE_Module_Type *new_module) { ACE_TRACE ("ACE_Stream_Type::push"); void *obj = this->object (); MT_Stream *str = (MT_Stream *) obj; new_module->link (this->head_); this->head_ = new_module; obj = new_module->object (); return str->push ((MT_Module *) obj); } ACE_Module_Type * ACE_Stream_Type::find (const ACE_TCHAR *module_name) const { ACE_TRACE ("ACE_Stream_Type::find"); for (ACE_Module_Type *m = this->head_; m != 0; m = m->link ()) if (ACE_OS::strcmp (m->name (), module_name) == 0) return m; return 0; } // @@@ Eliminated ommented out explicit template instantiation code ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
vm03/android_kernel_lge_msm8226
net/core/dev.c
1289
163585
/* * NET3 Protocol independent device support routines. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Derived from the non IP parts of dev.c 1.0.19 * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * * Additional Authors: * Florian la Roche <rzsfl@rz.uni-sb.de> * Alan Cox <gw4pts@gw4pts.ampr.org> * David Hinds <dahinds@users.sourceforge.net> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Adam Sulmicki <adam@cfar.umd.edu> * Pekka Riikonen <priikone@poesidon.pspt.fi> * * Changes: * D.J. Barrow : Fixed bug where dev->refcnt gets set * to 2 if register_netdev gets called * before net_dev_init & also removed a * few lines of code in the process. * Alan Cox : device private ioctl copies fields back. * Alan Cox : Transmit queue code does relevant * stunts to keep the queue safe. * Alan Cox : Fixed double lock. * Alan Cox : Fixed promisc NULL pointer trap * ???????? : Support the full private ioctl range * Alan Cox : Moved ioctl permission check into * drivers * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI * Alan Cox : 100 backlog just doesn't cut it when * you start doing multicast video 8) * Alan Cox : Rewrote net_bh and list manager. * Alan Cox : Fix ETH_P_ALL echoback lengths. * Alan Cox : Took out transmit every packet pass * Saved a few bytes in the ioctl handler * Alan Cox : Network driver sets packet type before * calling netif_rx. Saves a function * call a packet. * Alan Cox : Hashed net_bh() * Richard Kooijman: Timestamp fixes. * Alan Cox : Wrong field in SIOCGIFDSTADDR * Alan Cox : Device lock protection. * Alan Cox : Fixed nasty side effect of device close * changes. * Rudi Cilibrasi : Pass the right thing to * set_mac_address() * Dave Miller : 32bit quantity for the device lock to * make it work out on a Sparc. * Bjorn Ekwall : Added KERNELD hack. * Alan Cox : Cleaned up the backlog initialise. * Craig Metz : SIOCGIFCONF fix if space for under * 1 device. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there * is no device open function. * Andi Kleen : Fix error reporting for SIOCGIFCONF * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF * Cyrus Durgin : Cleaned for KMOD * Adam Sulmicki : Bug Fix : Network Device Unload * A network device unload needs to purge * the backlog queue. * Paul Rusty Russell : SIOCSIFNAME * Pekka Riikonen : Netdev boot-time settings code * Andrew Morton : Make unregister_netdevice wait * indefinitely on dev->refcnt * J Hadi Salim : - Backlog queue sampling * - netif_rx() feedback */ #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/notifier.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <net/dst.h> #include <net/pkt_sched.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/netpoll.h> #include <linux/rcupdate.h> #include <linux/delay.h> #include <net/wext.h> #include <net/iw_handler.h> #include <asm/current.h> #include <linux/audit.h> #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/jhash.h> #include <linux/random.h> #include <trace/events/napi.h> #include <trace/events/net.h> #include <trace/events/skb.h> #include <linux/pci.h> #include <linux/inetdevice.h> #include <linux/cpu_rmap.h> #include <linux/net_tstamp.h> #include <linux/static_key.h> #include <net/flow_keys.h> #include "net-sysfs.h" /* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 /* This should be increased if a protocol with a bigger head is added. */ #define GRO_MAX_HEAD (MAX_HEADER + 128) /* * The list of packet types we will receive (as opposed to discard) * and the routines to invoke. * * Why 16. Because with 16 the only overlap we get on a hash of the * low nibble of the protocol value is RARP/SNAP/X.25. * * NOTE: That is no longer true with the addition of VLAN tags. Not * sure which should go first, but I bet it won't make much * difference if we are running VLANs. The good news is that * this protocol won't be in the list unless compiled in, so * the average user (w/out VLANs) will not be adversely affected. * --BLG * * 0800 IP * 8100 802.1Q VLAN * 0001 802.3 * 0002 AX.25 * 0004 802.2 * 8035 RARP * 0005 SNAP * 0805 X.25 * 0806 ARP * 8137 IPX * 0009 Localtalk * 86DD IPv6 */ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; static struct list_head ptype_all __read_mostly; /* Taps */ /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. * * Pure readers hold dev_base_lock for reading, or rcu_read_lock() * * Writers must hold the rtnl semaphore while they loop through the * dev_base_head list, and hold dev_base_lock for writing when they do the * actual updates. This allows pure readers to access the list even * while a writer is preparing to update it. * * To put it another way, dev_base_lock is held for writing only to * protect against pure readers; the rtnl semaphore provides the * protection against other writers. * * See, for example usages, register_netdevice() and * unregister_netdevice(), which must be called with the rtnl * semaphore held. */ DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); static inline void dev_base_seq_inc(struct net *net) { while (++net->dev_base_seq == 0); } static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) { unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; } static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) { return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; } static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS spin_lock(&sd->input_pkt_queue.lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS spin_unlock(&sd->input_pkt_queue.lock); #endif } /* Device list insertion */ static int list_netdevice(struct net_device *dev) { struct net *net = dev_net(dev); ASSERT_RTNL(); write_lock_bh(&dev_base_lock); list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); hlist_add_head_rcu(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); write_unlock_bh(&dev_base_lock); dev_base_seq_inc(net); return 0; } /* Device list removal * caller must respect a RCU grace period before freeing/reusing dev */ static void unlist_netdevice(struct net_device *dev) { ASSERT_RTNL(); /* Unlink dev from the device chain */ write_lock_bh(&dev_base_lock); list_del_rcu(&dev->dev_list); hlist_del_rcu(&dev->name_hlist); hlist_del_rcu(&dev->index_hlist); write_unlock_bh(&dev_base_lock); dev_base_seq_inc(dev_net(dev)); } /* * Our notifier list */ static RAW_NOTIFIER_HEAD(netdev_chain); /* * Device drivers call our routines to queue packets here. We empty the * queue in the local softnet handler. */ DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); EXPORT_PER_CPU_SYMBOL(softnet_data); #ifdef CONFIG_LOCKDEP /* * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type */ static const unsigned short netdev_lock_type[] = {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; static const char *const netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; static inline unsigned short netdev_lock_pos(unsigned short dev_type) { int i; for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) if (netdev_lock_type[i] == dev_type) return i; /* the last key is used by default */ return ARRAY_SIZE(netdev_lock_type) - 1; } static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, unsigned short dev_type) { int i; i = netdev_lock_pos(dev_type); lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], netdev_lock_name[i]); } static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { int i; i = netdev_lock_pos(dev->type); lockdep_set_class_and_name(&dev->addr_list_lock, &netdev_addr_lock_key[i], netdev_lock_name[i]); } #else static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, unsigned short dev_type) { } static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { } #endif /******************************************************************************* Protocol management and registration routines *******************************************************************************/ /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be * here. * * BEWARE!!! Protocol handlers, mangling input packets, * MUST BE last in hash buckets and checking protocol handlers * MUST start from promiscuous ptype_all chain in net_bh. * It is true now, do not change it. * Explanation follows: if protocol handler, mangling packet, will * be the first on list, it is not able to sense, that packet * is cloned and should be copied-on-write, so that it will * change it and subsequent readers will get broken packet. * --ANK (980803) */ static inline struct list_head *ptype_head(const struct packet_type *pt) { if (pt->type == htons(ETH_P_ALL)) return &ptype_all; else return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; } /** * dev_add_pack - add packet handler * @pt: packet type declaration * * Add a protocol handler to the networking stack. The passed &packet_type * is linked into kernel lists and may not be freed until it has been * removed from the kernel lists. * * This call does not sleep therefore it can not * guarantee all CPU's that are in middle of receiving packets * will see the new packet type (until the next received packet). */ void dev_add_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); spin_lock(&ptype_lock); list_add_rcu(&pt->list, head); spin_unlock(&ptype_lock); } EXPORT_SYMBOL(dev_add_pack); /** * __dev_remove_pack - remove packet handler * @pt: packet type declaration * * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function * returns. * * The packet type might still be in use by receivers * and must not be freed until after all the CPU's have gone * through a quiescent state. */ void __dev_remove_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); struct packet_type *pt1; spin_lock(&ptype_lock); list_for_each_entry(pt1, head, list) { if (pt == pt1) { list_del_rcu(&pt->list); goto out; } } pr_warn("dev_remove_pack: %p not found\n", pt); out: spin_unlock(&ptype_lock); } EXPORT_SYMBOL(__dev_remove_pack); /** * dev_remove_pack - remove packet handler * @pt: packet type declaration * * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function * returns. * * This call sleeps to guarantee that no CPU is looking at the packet * type after return. */ void dev_remove_pack(struct packet_type *pt) { __dev_remove_pack(pt); synchronize_net(); } EXPORT_SYMBOL(dev_remove_pack); /****************************************************************************** Device Boot-time Settings Routines *******************************************************************************/ /* Boot time configuration table */ static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; /** * netdev_boot_setup_add - add new setup entry * @name: name of the device * @map: configured settings for the device * * Adds new setup entry to the dev_boot_setup list. The function * returns 0 on error and 1 on success. This is a generic routine to * all netdevices. */ static int netdev_boot_setup_add(char *name, struct ifmap *map) { struct netdev_boot_setup *s; int i; s = dev_boot_setup; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { memset(s[i].name, 0, sizeof(s[i].name)); strlcpy(s[i].name, name, IFNAMSIZ); memcpy(&s[i].map, map, sizeof(s[i].map)); break; } } return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; } /** * netdev_boot_setup_check - check boot time settings * @dev: the netdevice * * Check boot time settings for the device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found, 1 if they are. */ int netdev_boot_setup_check(struct net_device *dev) { struct netdev_boot_setup *s = dev_boot_setup; int i; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && !strcmp(dev->name, s[i].name)) { dev->irq = s[i].map.irq; dev->base_addr = s[i].map.base_addr; dev->mem_start = s[i].map.mem_start; dev->mem_end = s[i].map.mem_end; return 1; } } return 0; } EXPORT_SYMBOL(netdev_boot_setup_check); /** * netdev_boot_base - get address from boot time settings * @prefix: prefix for network device * @unit: id for network device * * Check boot time settings for the base address of device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found. */ unsigned long netdev_boot_base(const char *prefix, int unit) { const struct netdev_boot_setup *s = dev_boot_setup; char name[IFNAMSIZ]; int i; sprintf(name, "%s%d", prefix, unit); /* * If device already registered then return base of 1 * to indicate not to probe for this interface */ if (__dev_get_by_name(&init_net, name)) return 1; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) if (!strcmp(name, s[i].name)) return s[i].map.base_addr; return 0; } /* * Saves at boot time configured settings for any netdevice. */ int __init netdev_boot_setup(char *str) { int ints[5]; struct ifmap map; str = get_options(str, ARRAY_SIZE(ints), ints); if (!str || !*str) return 0; /* Save settings */ memset(&map, 0, sizeof(map)); if (ints[0] > 0) map.irq = ints[1]; if (ints[0] > 1) map.base_addr = ints[2]; if (ints[0] > 2) map.mem_start = ints[3]; if (ints[0] > 3) map.mem_end = ints[4]; /* Add new entry to the list */ return netdev_boot_setup_add(str, &map); } __setup("netdev=", netdev_boot_setup); /******************************************************************************* Device Interface Subroutines *******************************************************************************/ /** * __dev_get_by_name - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. Must be called under RTNL semaphore * or @dev_base_lock. If the name is found a pointer to the device * is returned. If the name is not found then %NULL is returned. The * reference counters are not incremented so the caller must be * careful with locks. */ struct net_device *__dev_get_by_name(struct net *net, const char *name) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry(dev, p, head, name_hlist) if (!strncmp(dev->name, name, IFNAMSIZ)) return dev; return NULL; } EXPORT_SYMBOL(__dev_get_by_name); /** * dev_get_by_name_rcu - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. * If the name is found a pointer to the device is returned. * If the name is not found then %NULL is returned. * The reference counters are not incremented so the caller must be * careful with locks. The caller must hold RCU lock. */ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry_rcu(dev, p, head, name_hlist) if (!strncmp(dev->name, name, IFNAMSIZ)) return dev; return NULL; } EXPORT_SYMBOL(dev_get_by_name_rcu); /** * dev_get_by_name - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. This can be called from any * context and does its own locking. The returned handle has * the usage count incremented and the caller must use dev_put() to * release it when it is no longer needed. %NULL is returned if no * matching device is found. */ struct net_device *dev_get_by_name(struct net *net, const char *name) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); if (dev) dev_hold(dev); rcu_read_unlock(); return dev; } EXPORT_SYMBOL(dev_get_by_name); /** * __dev_get_by_index - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns %NULL if the device * is not found or a pointer to the device. The device has not * had its reference counter increased so the caller must be careful * about locking. The caller must hold either the RTNL semaphore * or @dev_base_lock. */ struct net_device *__dev_get_by_index(struct net *net, int ifindex) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry(dev, p, head, index_hlist) if (dev->ifindex == ifindex) return dev; return NULL; } EXPORT_SYMBOL(__dev_get_by_index); /** * dev_get_by_index_rcu - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns %NULL if the device * is not found or a pointer to the device. The device has not * had its reference counter increased so the caller must be careful * about locking. The caller must hold RCU lock. */ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) { struct hlist_node *p; struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry_rcu(dev, p, head, index_hlist) if (dev->ifindex == ifindex) return dev; return NULL; } EXPORT_SYMBOL(dev_get_by_index_rcu); /** * dev_get_by_index - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns NULL if the device * is not found or a pointer to the device. The device returned has * had a reference added and the pointer is safe until the user calls * dev_put to indicate they have finished with it. */ struct net_device *dev_get_by_index(struct net *net, int ifindex) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) dev_hold(dev); rcu_read_unlock(); return dev; } EXPORT_SYMBOL(dev_get_by_index); /** * dev_getbyhwaddr_rcu - find a device by its hardware address * @net: the applicable net namespace * @type: media type of device * @ha: hardware address * * Search for an interface by MAC address. Returns NULL if the device * is not found or a pointer to the device. * The caller must hold RCU or RTNL. * The returned device has not had its ref count increased * and the caller must therefore be careful about locking * */ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *ha) { struct net_device *dev; for_each_netdev_rcu(net, dev) if (dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len)) return dev; return NULL; } EXPORT_SYMBOL(dev_getbyhwaddr_rcu); struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev; ASSERT_RTNL(); for_each_netdev(net, dev) if (dev->type == type) return dev; return NULL; } EXPORT_SYMBOL(__dev_getfirstbyhwtype); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev, *ret = NULL; rcu_read_lock(); for_each_netdev_rcu(net, dev) if (dev->type == type) { dev_hold(dev); ret = dev; break; } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(dev_getfirstbyhwtype); /** * dev_get_by_flags_rcu - find any device with given flags * @net: the applicable net namespace * @if_flags: IFF_* values * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device * is not found or a pointer to the device. Must be called inside * rcu_read_lock(), and result refcount is unchanged. */ struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, unsigned short mask) { struct net_device *dev, *ret; ret = NULL; for_each_netdev_rcu(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { ret = dev; break; } } return ret; } EXPORT_SYMBOL(dev_get_by_flags_rcu); /** * dev_valid_name - check if name is okay for network device * @name: name string * * Network device names need to be valid file names to * to allow sysfs to work. We also disallow any kind of * whitespace. */ bool dev_valid_name(const char *name) { if (*name == '\0') return false; if (strlen(name) >= IFNAMSIZ) return false; if (!strcmp(name, ".") || !strcmp(name, "..")) return false; while (*name) { if (*name == '/' || isspace(*name)) return false; name++; } return true; } EXPORT_SYMBOL(dev_valid_name); /** * __dev_alloc_name - allocate a name for a device * @net: network namespace to allocate the device name in * @name: name format string * @buf: scratch buffer and result name string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses * the first empty slot. The caller must hold the dev_base or rtnl lock * while allocating the name and adding the device in order to avoid * duplicates. * Limited to bits_per_byte * page size devices (ie 32K on most platforms). * Returns the number of the unit assigned or a negative errno code. */ static int __dev_alloc_name(struct net *net, const char *name, char *buf) { int i = 0; const char *p; const int max_netdevices = 8*PAGE_SIZE; unsigned long *inuse; struct net_device *d; p = strnchr(name, IFNAMSIZ-1, '%'); if (p) { /* * Verify the string as this thing may have come from * the user. There must be either one "%d" and no other "%" * characters. */ if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; /* Use one page as a bit array of possible slots */ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); if (!inuse) return -ENOMEM; for_each_netdev(net, d) { if (!sscanf(d->name, name, &i)) continue; if (i < 0 || i >= max_netdevices) continue; /* avoid cases where sscanf is not exact inverse of printf */ snprintf(buf, IFNAMSIZ, name, i); if (!strncmp(buf, d->name, IFNAMSIZ)) set_bit(i, inuse); } i = find_first_zero_bit(inuse, max_netdevices); free_page((unsigned long) inuse); } if (buf != name) snprintf(buf, IFNAMSIZ, name, i); if (!__dev_get_by_name(net, buf)) return i; /* It is possible to run out of possible slots * when the name is long and there isn't enough space left * for the digits, or if all bits are used. */ return -ENFILE; } /** * dev_alloc_name - allocate a name for a device * @dev: device * @name: name format string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses * the first empty slot. The caller must hold the dev_base or rtnl lock * while allocating the name and adding the device in order to avoid * duplicates. * Limited to bits_per_byte * page size devices (ie 32K on most platforms). * Returns the number of the unit assigned or a negative errno code. */ int dev_alloc_name(struct net_device *dev, const char *name) { char buf[IFNAMSIZ]; struct net *net; int ret; BUG_ON(!dev_net(dev)); net = dev_net(dev); ret = __dev_alloc_name(net, name, buf); if (ret >= 0) strlcpy(dev->name, buf, IFNAMSIZ); return ret; } EXPORT_SYMBOL(dev_alloc_name); static int dev_get_valid_name(struct net_device *dev, const char *name) { struct net *net; BUG_ON(!dev_net(dev)); net = dev_net(dev); if (!dev_valid_name(name)) return -EINVAL; if (strchr(name, '%')) return dev_alloc_name(dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); return 0; } /** * dev_change_name - change name of a device * @dev: device * @newname: name (or format string) must be at least IFNAMSIZ * * Change name of a device, can pass format strings "eth%d". * for wildcarding. */ int dev_change_name(struct net_device *dev, const char *newname) { char oldname[IFNAMSIZ]; int err = 0; int ret; struct net *net; ASSERT_RTNL(); BUG_ON(!dev_net(dev)); net = dev_net(dev); if (dev->flags & IFF_UP) return -EBUSY; if (strncmp(newname, dev->name, IFNAMSIZ) == 0) return 0; memcpy(oldname, dev->name, IFNAMSIZ); err = dev_get_valid_name(dev, newname); if (err < 0) return err; rollback: ret = device_rename(&dev->dev, dev->name); if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); return ret; } write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); write_unlock_bh(&dev_base_lock); synchronize_rcu(); write_lock_bh(&dev_base_lock); hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); write_unlock_bh(&dev_base_lock); ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); ret = notifier_to_errno(ret); if (ret) { /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; memcpy(dev->name, oldname, IFNAMSIZ); goto rollback; } else { pr_err("%s: name change rollback failed: %d\n", dev->name, ret); } } return err; } /** * dev_set_alias - change ifalias of a device * @dev: device * @alias: name up to IFALIASZ * @len: limit of bytes to copy from info * * Set ifalias for a device, */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { ASSERT_RTNL(); if (len >= IFALIASZ) return -EINVAL; if (!len) { if (dev->ifalias) { kfree(dev->ifalias); dev->ifalias = NULL; } return 0; } dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); if (!dev->ifalias) return -ENOMEM; strlcpy(dev->ifalias, alias, len+1); return len; } /** * netdev_features_change - device changes features * @dev: device to cause notification * * Called to indicate a device has changed features. */ void netdev_features_change(struct net_device *dev) { call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); } EXPORT_SYMBOL(netdev_features_change); /** * netdev_state_change - device changes state * @dev: device to cause notification * * Called to indicate a device has changed state. This function calls * the notifier chains for netdev_chain and sends a NEWLINK message * to the routing socket. */ void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { call_netdevice_notifiers(NETDEV_CHANGE, dev); rtmsg_ifinfo(RTM_NEWLINK, dev, 0); } } EXPORT_SYMBOL(netdev_state_change); int netdev_bonding_change(struct net_device *dev, unsigned long event) { return call_netdevice_notifiers(event, dev); } EXPORT_SYMBOL(netdev_bonding_change); /** * dev_load - load a network module * @net: the applicable net namespace * @name: name of interface * * If a network interface is not present and the process has suitable * privileges this function loads the module. If module loading is not * available in this kernel then it becomes a nop. */ void dev_load(struct net *net, const char *name) { struct net_device *dev; int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); no_module = !dev; if (no_module && capable(CAP_NET_ADMIN)) no_module = request_module("netdev-%s", name); if (no_module && capable(CAP_SYS_MODULE)) { if (!request_module("%s", name)) pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", name); } } EXPORT_SYMBOL(dev_load); static int __dev_open(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; int ret; ASSERT_RTNL(); if (!netif_device_present(dev)) return -ENODEV; ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); ret = notifier_to_errno(ret); if (ret) return ret; set_bit(__LINK_STATE_START, &dev->state); if (ops->ndo_validate_addr) ret = ops->ndo_validate_addr(dev); if (!ret && ops->ndo_open) ret = ops->ndo_open(dev); if (ret) clear_bit(__LINK_STATE_START, &dev->state); else { dev->flags |= IFF_UP; net_dmaengine_get(); dev_set_rx_mode(dev); dev_activate(dev); } return ret; } /** * dev_open - prepare an interface for use. * @dev: device to open * * Takes a device from down to up state. The device's private open * function is invoked and then the multicast lists are loaded. Finally * the device is moved into the up state and a %NETDEV_UP message is * sent to the netdev notifier chain. * * Calling this function on an active interface is a nop. On a failure * a negative errno code is returned. */ int dev_open(struct net_device *dev) { int ret; if (dev->flags & IFF_UP) return 0; ret = __dev_open(dev); if (ret < 0) return ret; rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); call_netdevice_notifiers(NETDEV_UP, dev); return ret; } EXPORT_SYMBOL(dev_open); static int __dev_close_many(struct list_head *head) { struct net_device *dev; ASSERT_RTNL(); might_sleep(); list_for_each_entry(dev, head, unreg_list) { call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); clear_bit(__LINK_STATE_START, &dev->state); /* Synchronize to scheduled poll. We cannot touch poll list, it * can be even on different cpu. So just clear netif_running(). * * dev->stop() will invoke napi_disable() on all of it's * napi_struct instances on this device. */ smp_mb__after_clear_bit(); /* Commit netif_running(). */ } dev_deactivate_many(head); list_for_each_entry(dev, head, unreg_list) { const struct net_device_ops *ops = dev->netdev_ops; /* * Call the device specific close. This cannot fail. * Only if device is UP * * We allow it to be called even after a DETACH hot-plug * event. */ if (ops->ndo_stop) ops->ndo_stop(dev); dev->flags &= ~IFF_UP; net_dmaengine_put(); } return 0; } static int __dev_close(struct net_device *dev) { int retval; LIST_HEAD(single); list_add(&dev->unreg_list, &single); retval = __dev_close_many(&single); list_del(&single); return retval; } static int dev_close_many(struct list_head *head) { struct net_device *dev, *tmp; LIST_HEAD(tmp_list); list_for_each_entry_safe(dev, tmp, head, unreg_list) if (!(dev->flags & IFF_UP)) list_move(&dev->unreg_list, &tmp_list); __dev_close_many(head); list_for_each_entry(dev, head, unreg_list) { rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); call_netdevice_notifiers(NETDEV_DOWN, dev); } /* rollback_registered_many needs the complete original list */ list_splice(&tmp_list, head); return 0; } /** * dev_close - shutdown an interface. * @dev: device to shutdown * * This function moves an active device into down state. A * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier * chain. */ int dev_close(struct net_device *dev) { if (dev->flags & IFF_UP) { LIST_HEAD(single); list_add(&dev->unreg_list, &single); dev_close_many(&single); list_del(&single); } return 0; } EXPORT_SYMBOL(dev_close); /** * dev_disable_lro - disable Large Receive Offload on a device * @dev: device * * Disable Large Receive Offload (LRO) on a net device. Must be * called under RTNL. This is needed if received packets may be * forwarded to another interface. */ void dev_disable_lro(struct net_device *dev) { /* * If we're trying to disable lro on a vlan device * use the underlying physical device instead */ if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); dev->wanted_features &= ~NETIF_F_LRO; netdev_update_features(dev); if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); } EXPORT_SYMBOL(dev_disable_lro); static int dev_boot_phase = 1; /** * register_netdevice_notifier - register a network notifier block * @nb: notifier * * Register a notifier to be called when network device events occur. * The notifier passed is linked into the kernel structures and must * not be reused until it has been unregistered. A negative errno code * is returned on a failure. * * When registered all registration and up events are replayed * to the new notifier to allow device to have a race free * view of the network device list. */ int register_netdevice_notifier(struct notifier_block *nb) { struct net_device *dev; struct net_device *last; struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_register(&netdev_chain, nb); if (err) goto unlock; if (dev_boot_phase) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { err = nb->notifier_call(nb, NETDEV_REGISTER, dev); err = notifier_to_errno(err); if (err) goto rollback; if (!(dev->flags & IFF_UP)) continue; nb->notifier_call(nb, NETDEV_UP, dev); } } unlock: rtnl_unlock(); return err; rollback: last = dev; for_each_net(net) { for_each_netdev(net, dev) { if (dev == last) goto outroll; if (dev->flags & IFF_UP) { nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } } outroll: raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } EXPORT_SYMBOL(register_netdevice_notifier); /** * unregister_netdevice_notifier - unregister a network notifier block * @nb: notifier * * Unregister a notifier previously registered by * register_netdevice_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. * * After unregistering unregister and down device events are synthesized * for all devices on the device list to the removed notifier to remove * the need for special case cleanup code. */ int unregister_netdevice_notifier(struct notifier_block *nb) { struct net_device *dev; struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); if (err) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { if (dev->flags & IFF_UP) { nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } } unlock: rtnl_unlock(); return err; } EXPORT_SYMBOL(unregister_netdevice_notifier); /** * call_netdevice_notifiers - call all network notifier blocks * @val: value passed unmodified to notifier function * @dev: net_device pointer passed unmodified to notifier function * * Call all network notifier blocks. Parameters and return value * are as for raw_notifier_call_chain(). */ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { ASSERT_RTNL(); return raw_notifier_call_chain(&netdev_chain, val, dev); } EXPORT_SYMBOL(call_netdevice_notifiers); static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL /* We are not allowed to call static_key_slow_dec() from irq context * If net_disable_timestamp() is called from irq context, defer the * static_key_slow_dec() calls. */ static atomic_t netstamp_needed_deferred; #endif void net_enable_timestamp(void) { #ifdef HAVE_JUMP_LABEL int deferred = atomic_xchg(&netstamp_needed_deferred, 0); if (deferred) { while (--deferred) static_key_slow_dec(&netstamp_needed); return; } #endif WARN_ON(in_interrupt()); static_key_slow_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL if (in_interrupt()) { atomic_inc(&netstamp_needed_deferred); return; } #endif static_key_slow_dec(&netstamp_needed); } EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { skb->tstamp.tv64 = 0; if (static_key_false(&netstamp_needed)) __net_timestamp(skb); } #define net_timestamp_check(COND, SKB) \ if (static_key_false(&netstamp_needed)) { \ if ((COND) && !(SKB)->tstamp.tv64) \ __net_timestamp(SKB); \ } \ static int net_hwtstamp_validate(struct ifreq *ifr) { struct hwtstamp_config cfg; enum hwtstamp_tx_types tx_type; enum hwtstamp_rx_filters rx_filter; int tx_type_valid = 0; int rx_filter_valid = 0; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; if (cfg.flags) /* reserved for future extensions */ return -EINVAL; tx_type = cfg.tx_type; rx_filter = cfg.rx_filter; switch (tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: case HWTSTAMP_TX_ONESTEP_SYNC: tx_type_valid = 1; break; } switch (rx_filter) { case HWTSTAMP_FILTER_NONE: case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: rx_filter_valid = 1; break; } if (!tx_type_valid || !rx_filter_valid) return -ERANGE; return 0; } static inline bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) { unsigned int len; if (!(dev->flags & IFF_UP)) return false; len = dev->mtu + dev->hard_header_len + VLAN_HLEN; if (skb->len <= len) return true; /* if TSO is enabled, we don't care about the length as the packet * could be forwarded without being segmented before */ if (skb_is_gso(skb)) return true; return false; } /** * dev_forward_skb - loopback an skb to another netif * * @dev: destination network device * @skb: buffer to forward * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped, but freed) * * dev_forward_skb can be used for injecting an skb from the * start_xmit function of one device into the receive queue * of another device. * * The receiving device may be in another namespace, so * we have to clear all information in the skb that could * impact namespace isolation. */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, GFP_ATOMIC)) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } } skb_orphan(skb); nf_reset(skb); if (unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } skb->skb_iif = 0; skb->dev = dev; skb_dst_drop(skb); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, dev); skb->mark = 0; secpath_reset(skb); nf_reset(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); static inline int deliver_skb(struct sk_buff *skb, struct packet_type *pt_prev, struct net_device *orig_dev) { atomic_inc(&skb->users); return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } /* * Support routine. Sends outgoing frames to any network * taps currently in use. */ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { struct packet_type *ptype; struct sk_buff *skb2 = NULL; struct packet_type *pt_prev = NULL; rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_all, list) { /* Never send packets back to the socket * they originated from - MvS (miquels@drinkel.ow.org) */ if ((ptype->dev == dev || !ptype->dev) && (ptype->af_packet_priv == NULL || (struct sock *)ptype->af_packet_priv != skb->sk)) { if (pt_prev) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; continue; } skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) break; net_timestamp_set(skb2); /* skb->nh should be correctly set by sender, so that the second statement is just protection against buggy protocols. */ skb_reset_mac_header(skb2); if (skb_network_header(skb2) < skb2->data || skb2->network_header > skb2->tail) { if (net_ratelimit()) pr_crit("protocol %04x is buggy, dev %s\n", ntohs(skb2->protocol), dev->name); skb_reset_network_header(skb2); } skb2->transport_header = skb2->network_header; skb2->pkt_type = PACKET_OUTGOING; pt_prev = ptype; } } if (pt_prev) pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); rcu_read_unlock(); } /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change * @dev: Network device * @txq: number of queues available * * If real_num_tx_queues is changed the tc mappings may no longer be * valid. To resolve this verify the tc mapping remains valid and if * not NULL the mapping. With no priorities mapping to this * offset/count pair it will no longer be used. In the worst case TC0 * is invalid nothing can be done so disable priority mappings. If is * expected that drivers will fix this mapping if they can before * calling netif_set_real_num_tx_queues. */ static void netif_setup_tc(struct net_device *dev, unsigned int txq) { int i; struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; /* If TC0 is invalidated disable TC mapping */ if (tc->offset + tc->count > txq) { pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); dev->num_tc = 0; return; } /* Invalidated prio to tc mappings set to TC0 */ for (i = 1; i < TC_BITMASK + 1; i++) { int q = netdev_get_prio_tc_map(dev, i); tc = &dev->tc_to_txq[q]; if (tc->offset + tc->count > txq) { pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", i, q); netdev_set_prio_tc_map(dev, i, 0); } } } /* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. */ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { int rc; if (txq < 1 || txq > dev->num_tx_queues) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) { ASSERT_RTNL(); rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, txq); if (rc) return rc; if (dev->num_tc) netif_setup_tc(dev, txq); if (txq < dev->real_num_tx_queues) qdisc_reset_all_tx_gt(dev, txq); } dev->real_num_tx_queues = txq; return 0; } EXPORT_SYMBOL(netif_set_real_num_tx_queues); #ifdef CONFIG_RPS /** * netif_set_real_num_rx_queues - set actual number of RX queues used * @dev: Network device * @rxq: Actual number of RX queues * * This must be called either with the rtnl_lock held or before * registration of the net device. Returns 0 on success, or a * negative error code. If called before registration, it always * succeeds. */ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) { int rc; if (rxq < 1 || rxq > dev->num_rx_queues) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED) { ASSERT_RTNL(); rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, rxq); if (rc) return rc; } dev->real_num_rx_queues = rxq; return 0; } EXPORT_SYMBOL(netif_set_real_num_rx_queues); #endif static inline void __netif_reschedule(struct Qdisc *q) { struct softnet_data *sd; unsigned long flags; local_irq_save(flags); sd = &__get_cpu_var(softnet_data); q->next_sched = NULL; *sd->output_queue_tailp = q; sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } void __netif_schedule(struct Qdisc *q) { if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) __netif_reschedule(q); } EXPORT_SYMBOL(__netif_schedule); void dev_kfree_skb_irq(struct sk_buff *skb) { if (atomic_dec_and_test(&skb->users)) { struct softnet_data *sd; unsigned long flags; local_irq_save(flags); sd = &__get_cpu_var(softnet_data); skb->next = sd->completion_queue; sd->completion_queue = skb; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } } EXPORT_SYMBOL(dev_kfree_skb_irq); void dev_kfree_skb_any(struct sk_buff *skb) { if (in_irq() || irqs_disabled()) dev_kfree_skb_irq(skb); else dev_kfree_skb(skb); } EXPORT_SYMBOL(dev_kfree_skb_any); /** * netif_device_detach - mark device as removed * @dev: network device * * Mark device as removed from system and therefore no longer available. */ void netif_device_detach(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_tx_stop_all_queues(dev); } } EXPORT_SYMBOL(netif_device_detach); /** * netif_device_attach - mark device as attached * @dev: network device * * Mark device as attached from system and restart if needed. */ void netif_device_attach(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_tx_wake_all_queues(dev); __netdev_watchdog_up(dev); } } EXPORT_SYMBOL(netif_device_attach); static void skb_warn_bad_offload(const struct sk_buff *skb) { static const netdev_features_t null_features = 0; struct net_device *dev = skb->dev; const char *driver = ""; if (dev && dev->dev.parent) driver = dev_driver_string(dev->dev.parent); WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " "gso_type=%d ip_summed=%d\n", driver, dev ? &dev->features : &null_features, skb->sk ? &skb->sk->sk_route_caps : &null_features, skb->len, skb->data_len, skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type, skb->ip_summed); } /* * Invalidate hardware checksum when packet is to be mangled, and * complete checksum manually on outgoing path. */ int skb_checksum_help(struct sk_buff *skb) { __wsum csum; int ret = 0, offset; if (skb->ip_summed == CHECKSUM_COMPLETE) goto out_set_summed; if (unlikely(skb_shinfo(skb)->gso_size)) { skb_warn_bad_offload(skb); return -EINVAL; } offset = skb_checksum_start_offset(skb); BUG_ON(offset >= skb_headlen(skb)); csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(__sum16))) { ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (ret) goto out; } *(__sum16 *)(skb->data + offset) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: return ret; } EXPORT_SYMBOL(skb_checksum_help); /** * skb_gso_segment - Perform segmentation on skb. * @skb: buffer to segment * @features: features for the output path (see dev->features) * * This function segments the given skb and returns a list of segments. * * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. */ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; __be16 type = skb->protocol; int vlan_depth = ETH_HLEN; int err; while (type == htons(ETH_P_8021Q)) { struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) return ERR_PTR(-EINVAL); vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } skb_reset_mac_header(skb); skb->mac_len = skb->network_header - skb->mac_header; __skb_pull(skb, skb->mac_len); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { skb_warn_bad_offload(skb); if (skb_header_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return ERR_PTR(err); } rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && !ptype->dev && ptype->gso_segment) { if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { err = ptype->gso_send_check(skb); segs = ERR_PTR(err); if (err || skb_gso_ok(skb, features)) break; __skb_push(skb, (skb->data - skb_network_header(skb))); } segs = ptype->gso_segment(skb, features); break; } } rcu_read_unlock(); __skb_push(skb, skb->data - skb_mac_header(skb)); return segs; } EXPORT_SYMBOL(skb_gso_segment); /* Take action when hardware reception checksum errors are detected. */ #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev) { if (net_ratelimit()) { pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); dump_stack(); } } EXPORT_SYMBOL(netdev_rx_csum_fault); #endif /* Actually, we should eliminate this check as soon as we know, that: * 1. IOMMU is present and allows to map all the memory. * 2. No high memory really exists on this machine. */ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; if (!(dev->features & NETIF_F_HIGHDMA)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (PageHighMem(skb_frag_page(frag))) return 1; } } if (PCI_DMA_BUS_IS_PHYS) { struct device *pdev = dev->dev.parent; if (!pdev) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr = page_to_phys(skb_frag_page(frag)); if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) return 1; } } #endif return 0; } struct dev_gso_cb { void (*destructor)(struct sk_buff *skb); }; #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) static void dev_gso_skb_destructor(struct sk_buff *skb) { struct dev_gso_cb *cb; do { struct sk_buff *nskb = skb->next; skb->next = nskb->next; nskb->next = NULL; kfree_skb(nskb); } while (skb->next); cb = DEV_GSO_CB(skb); if (cb->destructor) cb->destructor(skb); } /** * dev_gso_segment - Perform emulated hardware segmentation on skb. * @skb: buffer to segment * @features: device features as applicable to this skb * * This function segments the given skb and stores the list of segments * in skb->next. */ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs; segs = skb_gso_segment(skb, features); /* Verifying header integrity only. */ if (!segs) return 0; if (IS_ERR(segs)) return PTR_ERR(segs); skb->next = segs; DEV_GSO_CB(skb)->destructor = skb->destructor; skb->destructor = dev_gso_skb_destructor; return 0; } /* * Try to orphan skb early, right before transmission by the device. * We cannot orphan skb if tx timestamp is requested or the sk-reference * is needed on driver level for other reasons, e.g. see net/can/raw.c */ static inline void skb_orphan_try(struct sk_buff *skb) { struct sock *sk = skb->sk; if (sk && !skb_shinfo(skb)->tx_flags) { /* skb_tx_hash() wont be able to get sk. * We copy sk_hash into skb->rxhash */ if (!skb->rxhash) skb->rxhash = sk->sk_hash; skb_orphan(skb); } } static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) { return ((features & NETIF_F_GEN_CSUM) || ((features & NETIF_F_V4_CSUM) && protocol == htons(ETH_P_IP)) || ((features & NETIF_F_V6_CSUM) && protocol == htons(ETH_P_IPV6)) || ((features & NETIF_F_FCOE_CRC) && protocol == htons(ETH_P_FCOE))); } static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { if (!can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } return features; } netdev_features_t netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; netdev_features_t features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } else if (!vlan_tx_tag_present(skb)) { return harmonize_features(skb, protocol, features); } features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); if (protocol != htons(ETH_P_8021Q)) { return harmonize_features(skb, protocol, features); } else { features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; return harmonize_features(skb, protocol, features); } } EXPORT_SYMBOL(netif_skb_features); /* * Returns true if either: * 1. skb has frag_list and the device doesn't support FRAGLIST, or * 2. skb is fragmented and the device does not support SG, or if * at least one of fragments is in highmem and device does not * support DMA from it. */ static inline int skb_needs_linearize(struct sk_buff *skb, int features) { return skb_is_nonlinear(skb) && ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); } int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { const struct net_device_ops *ops = dev->netdev_ops; int rc = NETDEV_TX_OK; unsigned int skb_len; if (likely(!skb->next)) { netdev_features_t features; /* * If device doesn't need skb->dst, release it right now while * its hot in this cpu cache */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb); if (!list_empty(&ptype_all)) dev_queue_xmit_nit(skb, dev); skb_orphan_try(skb); features = netif_skb_features(skb); if (vlan_tx_tag_present(skb) && !(features & NETIF_F_HW_VLAN_TX)) { skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) goto out; skb->vlan_tci = 0; } if (netif_needs_gso(skb, features)) { if (unlikely(dev_gso_segment(skb, features))) goto out_kfree_skb; if (skb->next) goto gso; } else { if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto out_kfree_skb; /* If packet is not checksummed and device does not * support checksumming for this protocol, complete * checksumming here. */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb_checksum_start_offset(skb)); if (!(features & NETIF_F_ALL_CSUM) && skb_checksum_help(skb)) goto out_kfree_skb; } } skb_len = skb->len; rc = ops->ndo_start_xmit(skb, dev); trace_net_dev_xmit(skb, rc, dev, skb_len); if (rc == NETDEV_TX_OK) txq_trans_update(txq); return rc; } gso: do { struct sk_buff *nskb = skb->next; skb->next = nskb->next; nskb->next = NULL; /* * If device doesn't need nskb->dst, release it right now while * its hot in this cpu cache */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(nskb); skb_len = nskb->len; rc = ops->ndo_start_xmit(nskb, dev); trace_net_dev_xmit(nskb, rc, dev, skb_len); if (unlikely(rc != NETDEV_TX_OK)) { if (rc & ~NETDEV_TX_MASK) goto out_kfree_gso_skb; nskb->next = skb->next; skb->next = nskb; return rc; } txq_trans_update(txq); if (unlikely(netif_xmit_stopped(txq) && skb->next)) return NETDEV_TX_BUSY; } while (skb->next); out_kfree_gso_skb: if (likely(skb->next == NULL)) skb->destructor = DEV_GSO_CB(skb)->destructor; out_kfree_skb: kfree_skb(skb); out: return rc; } static u32 hashrnd __read_mostly; /* * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, unsigned int num_tx_queues) { u32 hash; u16 qoffset = 0; u16 qcount = num_tx_queues; if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= num_tx_queues)) hash -= num_tx_queues; return hash; } if (dev->num_tc) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); qoffset = dev->tc_to_txq[tc].offset; qcount = dev->tc_to_txq[tc].count; } if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol ^ skb->rxhash; hash = jhash_1word(hash, hashrnd); return (u16) (((u64) hash * qcount) >> 32) + qoffset; } EXPORT_SYMBOL(__skb_tx_hash); static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { if (net_ratelimit()) { pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); } return 0; } return queue_index; } static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; struct xps_map *map; int queue_index = -1; rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { map = rcu_dereference( dev_maps->cpu_map[raw_smp_processor_id()]); if (map) { if (map->len == 1) queue_index = map->queues[0]; else { u32 hash; if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol ^ skb->rxhash; hash = jhash_1word(hash, hashrnd); queue_index = map->queues[ ((u64)hash * map->len) >> 32]; } if (unlikely(queue_index >= dev->real_num_tx_queues)) queue_index = -1; } } rcu_read_unlock(); return queue_index; #else return -1; #endif } static struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) { int queue_index; const struct net_device_ops *ops = dev->netdev_ops; if (dev->real_num_tx_queues == 1) queue_index = 0; else if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb); queue_index = dev_cap_txqueue(dev, queue_index); } else { struct sock *sk = skb->sk; queue_index = sk_tx_queue_get(sk); if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { int old_index = queue_index; queue_index = get_xps_queue(dev, skb); if (queue_index < 0) queue_index = skb_tx_hash(dev, skb); if (queue_index != old_index && sk) { struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); if (dst && skb_dst(skb) == dst) sk_tx_queue_set(sk, queue_index); } } } skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index); } static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq) { spinlock_t *root_lock = qdisc_lock(q); bool contended; int rc; qdisc_skb_cb(skb)->pkt_len = skb->len; qdisc_calculate_pkt_len(skb, q); /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. * This permits __QDISC_STATE_RUNNING owner to get the lock more often * and dequeue packets faster. */ contended = qdisc_is_running(q); if (unlikely(contended)) spin_lock(&q->busylock); spin_lock(root_lock); if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { kfree_skb(skb); rc = NET_XMIT_DROP; } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && qdisc_run_begin(q)) { /* * This is a work-conserving queue; there are no old skbs * waiting to be sent out; and the qdisc is not running - * xmit the skb directly. */ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) skb_dst_force(skb); qdisc_bstats_update(q, skb); if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; } __qdisc_run(q); } else qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { skb_dst_force(skb); rc = q->enqueue(skb, q) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; } __qdisc_run(q); } } spin_unlock(root_lock); if (unlikely(contended)) spin_unlock(&q->busylock); return rc; } #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) static void skb_update_prio(struct sk_buff *skb) { struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); if ((!skb->priority) && (skb->sk) && map) skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; } #else #define skb_update_prio(skb) #endif static DEFINE_PER_CPU(int, xmit_recursion); #define RECURSION_LIMIT 10 /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit * * Queue a buffer for transmission to a network device. The caller must * have set the device and priority and built the buffer before calling * this function. The function can be called from an interrupt. * * A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. * * ----------------------------------------------------------------------------------- * I notice this method can also return errors from the queue disciplines, * including NET_XMIT_DROP, which is a positive value. So, errors can also * be positive. * * Regardless of the return value, the skb is consumed, so it is currently * difficult to retry a send to this method. (You can bump the ref count * before sending to hold a reference for retry if you are careful.) * * When calling this method, interrupts MUST be enabled. This is because * the BH enable code must have IRQs enabled so that it will not deadlock. * --BLG */ int dev_queue_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct netdev_queue *txq; struct Qdisc *q; int rc = -ENOMEM; /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ rcu_read_lock_bh(); skb_update_prio(skb); txq = dev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc); #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); #endif trace_net_dev_queue(skb); if (q->enqueue) { rc = __dev_xmit_skb(skb, q, dev, txq); goto out; } /* The device has no queue. Common case for software devices: loopback, all the sorts of tunnels... Really, it is unlikely that netif_tx_lock protection is necessary here. (f.e. loopback and IP tunnels are clean ignoring statistics counters.) However, it is possible, that they rely on protection made by us here. Check this and shot the lock. It is not prone from deadlocks. Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ if (txq->xmit_lock_owner != cpu) { if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) goto recursion_alert; HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { __this_cpu_inc(xmit_recursion); rc = dev_hard_start_xmit(skb, dev, txq); __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; } } HARD_TX_UNLOCK(dev, txq); if (net_ratelimit()) pr_crit("Virtual device %s asks to queue packet!\n", dev->name); } else { /* Recursion is detected! It is possible, * unfortunately */ recursion_alert: if (net_ratelimit()) pr_crit("Dead loop on virtual device %s, fix it urgently!\n", dev->name); } } rc = -ENETDOWN; rcu_read_unlock_bh(); kfree_skb(skb); return rc; out: rcu_read_unlock_bh(); return rc; } EXPORT_SYMBOL(dev_queue_xmit); /*======================================================================= Receiver routines =======================================================================*/ int netdev_max_backlog __read_mostly = 1000; int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; int weight_p __read_mostly = 64; /* old backlog weight */ /* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, struct napi_struct *napi) { list_add_tail(&napi->poll_list, &sd->poll_list); __raise_softirq_irqoff(NET_RX_SOFTIRQ); } /* * __skb_get_rxhash: calculate a flow hash based on src/dst addresses * and src/dst port numbers. Sets rxhash in skb to non-zero hash value * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb * if hash is a canonical 4-tuple hash over transport ports. */ void __skb_get_rxhash(struct sk_buff *skb) { struct flow_keys keys; u32 hash; if (!skb_flow_dissect(skb, &keys)) return; if (keys.ports) { if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) swap(keys.port16[0], keys.port16[1]); skb->l4_rxhash = 1; } /* get a consistent hash (same value on both flow directions) */ if ((__force u32)keys.dst < (__force u32)keys.src) swap(keys.dst, keys.src); hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src, (__force u32)keys.ports, hashrnd); if (!hash) hash = 1; skb->rxhash = hash; } EXPORT_SYMBOL(__skb_get_rxhash); #ifdef CONFIG_RPS /* One global table that all flow-based protocols share. */ struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); struct static_key rps_needed __read_mostly; static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) { if (next_cpu != RPS_NO_CPU) { #ifdef CONFIG_RFS_ACCEL struct netdev_rx_queue *rxqueue; struct rps_dev_flow_table *flow_table; struct rps_dev_flow *old_rflow; u32 flow_id; u16 rxq_index; int rc; /* Should we steer this flow to a different hardware queue? */ if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || !(dev->features & NETIF_F_NTUPLE)) goto out; rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); if (rxq_index == skb_get_rx_queue(skb)) goto out; rxqueue = dev->_rx + rxq_index; flow_table = rcu_dereference(rxqueue->rps_flow_table); if (!flow_table) goto out; flow_id = skb->rxhash & flow_table->mask; rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, rxq_index, flow_id); if (rc < 0) goto out; old_rflow = rflow; rflow = &flow_table->flows[flow_id]; rflow->filter = rc; if (old_rflow->filter == rflow->filter) old_rflow->filter = RPS_NO_FILTER; out: #endif rflow->last_qtail = per_cpu(softnet_data, next_cpu).input_queue_head; } rflow->cpu = next_cpu; return rflow; } /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. * rcu_read_lock must be held on entry. */ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow **rflowp) { struct netdev_rx_queue *rxqueue; struct rps_map *map; struct rps_dev_flow_table *flow_table; struct rps_sock_flow_table *sock_flow_table; int cpu = -1; u16 tcpu; if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->real_num_rx_queues)) { WARN_ONCE(dev->real_num_rx_queues > 1, "%s received packet on queue %u, but number " "of RX queues is %u\n", dev->name, index, dev->real_num_rx_queues); goto done; } rxqueue = dev->_rx + index; } else rxqueue = dev->_rx; map = rcu_dereference(rxqueue->rps_map); if (map) { if (map->len == 1 && !rcu_access_pointer(rxqueue->rps_flow_table)) { tcpu = map->cpus[0]; if (cpu_online(tcpu)) cpu = tcpu; goto done; } } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { goto done; } skb_reset_network_header(skb); if (!skb_get_rxhash(skb)) goto done; flow_table = rcu_dereference(rxqueue->rps_flow_table); sock_flow_table = rcu_dereference(rps_sock_flow_table); if (flow_table && sock_flow_table) { u16 next_cpu; struct rps_dev_flow *rflow; rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; tcpu = rflow->cpu; next_cpu = sock_flow_table->ents[skb->rxhash & sock_flow_table->mask]; /* * If the desired CPU (where last recvmsg was done) is * different from current CPU (one in the rx-queue flow * table entry), switch if one of the following holds: * - Current CPU is unset (equal to RPS_NO_CPU). * - Current CPU is offline. * - The current CPU's queue tail has advanced beyond the * last packet that was enqueued using this table entry. * This guarantees that all previous packets for the flow * have been dequeued, thus preserving in order delivery. */ if (unlikely(tcpu != next_cpu) && (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - rflow->last_qtail)) >= 0)) rflow = set_rps_cpu(dev, skb, rflow, next_cpu); if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { *rflowp = rflow; cpu = tcpu; goto done; } } if (map) { tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; if (cpu_online(tcpu)) { cpu = tcpu; goto done; } } done: return cpu; } #ifdef CONFIG_RFS_ACCEL /** * rps_may_expire_flow - check whether an RFS hardware filter may be removed * @dev: Device on which the filter was set * @rxq_index: RX queue index * @flow_id: Flow ID passed to ndo_rx_flow_steer() * @filter_id: Filter ID returned by ndo_rx_flow_steer() * * Drivers that implement ndo_rx_flow_steer() should periodically call * this function for each installed filter and remove the filters for * which it returns %true. */ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, u16 filter_id) { struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; struct rps_dev_flow_table *flow_table; struct rps_dev_flow *rflow; bool expire = true; int cpu; rcu_read_lock(); flow_table = rcu_dereference(rxqueue->rps_flow_table); if (flow_table && flow_id <= flow_table->mask) { rflow = &flow_table->flows[flow_id]; cpu = ACCESS_ONCE(rflow->cpu); if (rflow->filter == filter_id && cpu != RPS_NO_CPU && ((int)(per_cpu(softnet_data, cpu).input_queue_head - rflow->last_qtail) < (int)(10 * flow_table->mask))) expire = false; } rcu_read_unlock(); return expire; } EXPORT_SYMBOL(rps_may_expire_flow); #endif /* CONFIG_RFS_ACCEL */ /* Called from hardirq (IPI) context */ static void rps_trigger_softirq(void *data) { struct softnet_data *sd = data; ____napi_schedule(sd, &sd->backlog); sd->received_rps++; } #endif /* CONFIG_RPS */ /* * Check if this softnet_data structure is another cpu one * If yes, queue it to our IPI list and return 1 * If no, return 0 */ static int rps_ipi_queued(struct softnet_data *sd) { #ifdef CONFIG_RPS struct softnet_data *mysd = &__get_cpu_var(softnet_data); if (sd != mysd) { sd->rps_ipi_next = mysd->rps_ipi_list; mysd->rps_ipi_list = sd; __raise_softirq_irqoff(NET_RX_SOFTIRQ); return 1; } #endif /* CONFIG_RPS */ return 0; } /* * enqueue_to_backlog is called to queue an skb to a per CPU backlog * queue (may be a remote CPU queue). */ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) { struct softnet_data *sd; unsigned long flags; sd = &per_cpu(softnet_data, cpu); local_irq_save(flags); rps_lock(sd); if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { if (skb_queue_len(&sd->input_pkt_queue)) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); input_queue_tail_incr_save(sd, qtail); rps_unlock(sd); local_irq_restore(flags); return NET_RX_SUCCESS; } /* Schedule NAPI for backlog device * We can use non atomic operation since we own the queue lock */ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { if (!rps_ipi_queued(sd)) ____napi_schedule(sd, &sd->backlog); } goto enqueue; } sd->dropped++; rps_unlock(sd); local_irq_restore(flags); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } /** * netif_rx - post buffer to the network code * @skb: buffer to post * * This function receives a packet from a device driver and queues it for * the upper (protocol) levels to process. It always succeeds. The buffer * may be dropped during processing for congestion control or by the * protocol layers. * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped) * */ int netif_rx(struct sk_buff *skb) { int ret; /* if netpoll wants it, pretend we never saw it */ if (netpoll_rx(skb)) return NET_RX_DROP; net_timestamp_check(netdev_tstamp_prequeue, skb); trace_netif_rx(skb); #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; preempt_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu < 0) cpu = smp_processor_id(); ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); preempt_enable(); } else #endif { unsigned int qtail; ret = enqueue_to_backlog(skb, get_cpu(), &qtail); put_cpu(); } return ret; } EXPORT_SYMBOL(netif_rx); int netif_rx_ni(struct sk_buff *skb) { int err; preempt_disable(); err = netif_rx(skb); if (local_softirq_pending()) do_softirq(); preempt_enable(); return err; } EXPORT_SYMBOL(netif_rx_ni); static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); if (sd->completion_queue) { struct sk_buff *clist; local_irq_disable(); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_enable(); while (clist) { struct sk_buff *skb = clist; clist = clist->next; WARN_ON(atomic_read(&skb->users)); trace_kfree_skb(skb, net_tx_action); __kfree_skb(skb); } } if (sd->output_queue) { struct Qdisc *head; local_irq_disable(); head = sd->output_queue; sd->output_queue = NULL; sd->output_queue_tailp = &sd->output_queue; local_irq_enable(); while (head) { struct Qdisc *q = head; spinlock_t *root_lock; head = head->next_sched; root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); qdisc_run(q); spin_unlock(root_lock); } else { if (!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)) { __netif_reschedule(q); } else { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); } } } } } #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) /* This hook is defined here for ATM LANE */ int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr) __read_mostly; EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); #endif #ifdef CONFIG_NET_CLS_ACT /* TODO: Maybe we should just force sch_ingress to be compiled in * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions * a compare and 2 stores extra right now if we dont have it on * but have CONFIG_NET_CLS_ACT * NOTE: This doesn't stop any functionality; if you dont have * the ingress scheduler, you just can't add policies on ingress. * */ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) { struct net_device *dev = skb->dev; u32 ttl = G_TC_RTTL(skb->tc_verd); int result = TC_ACT_OK; struct Qdisc *q; if (unlikely(MAX_RED_LOOP < ttl++)) { if (net_ratelimit()) pr_warn("Redir loop detected Dropping packet (%d->%d)\n", skb->skb_iif, dev->ifindex); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); q = rxq->qdisc; if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); } return result; } static inline struct sk_buff *handle_ing(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); if (!rxq || rxq->qdisc == &noop_qdisc) goto out; if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } switch (ing_filter(skb, rxq)) { case TC_ACT_SHOT: case TC_ACT_STOLEN: kfree_skb(skb); return NULL; } out: skb->tc_verd = 0; return skb; } #endif /** * netdev_rx_handler_register - register receive handler * @dev: device to register a handler for * @rx_handler: receive handler to register * @rx_handler_data: data pointer that is used by rx handler * * Register a receive hander for a device. This handler will then be * called from __netif_receive_skb. A negative errno code is returned * on a failure. * * The caller must hold the rtnl_mutex. * * For a general description of rx_handler, see enum rx_handler_result. */ int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data) { ASSERT_RTNL(); if (dev->rx_handler) return -EBUSY; rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); return 0; } EXPORT_SYMBOL_GPL(netdev_rx_handler_register); /** * netdev_rx_handler_unregister - unregister receive handler * @dev: device to unregister a handler from * * Unregister a receive hander from a device. * * The caller must hold the rtnl_mutex. */ void netdev_rx_handler_unregister(struct net_device *dev) { ASSERT_RTNL(); RCU_INIT_POINTER(dev->rx_handler, NULL); RCU_INIT_POINTER(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); static int __netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; struct net_device *orig_dev; struct net_device *null_or_dev; bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; net_timestamp_check(!netdev_tstamp_prequeue, skb); trace_netif_receive_skb(skb); /* if we've gotten here through NAPI, check netpoll */ if (netpoll_receive_skb(skb)) return NET_RX_DROP; if (!skb->skb_iif) skb->skb_iif = skb->dev->ifindex; orig_dev = skb->dev; skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_reset_mac_len(skb); pt_prev = NULL; rcu_read_lock(); another_round: __this_cpu_inc(softnet_data.processed); if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { skb = vlan_untag(skb); if (unlikely(!skb)) goto out; } #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); goto ncls; } #endif list_for_each_entry_rcu(ptype, &ptype_all, list) { if (!ptype->dev || ptype->dev == skb->dev) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } } #ifdef CONFIG_NET_CLS_ACT skb = handle_ing(skb, &pt_prev, &ret, orig_dev); if (!skb) goto out; ncls: #endif rx_handler = rcu_dereference(skb->dev->rx_handler); if (vlan_tx_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } if (vlan_do_receive(&skb, !rx_handler)) goto another_round; else if (unlikely(!skb)) goto out; } if (rx_handler) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: goto out; case RX_HANDLER_ANOTHER: goto another_round; case RX_HANDLER_EXACT: deliver_exact = true; case RX_HANDLER_PASS: break; default: BUG(); } } /* deliver only exact match when indicated */ null_or_dev = deliver_exact ? skb->dev : NULL; type = skb->protocol; list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && (ptype->dev == null_or_dev || ptype->dev == skb->dev || ptype->dev == orig_dev)) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } } if (pt_prev) { ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); /* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) */ ret = NET_RX_DROP; } out: rcu_read_unlock(); return ret; } /** * netif_receive_skb - process receive buffer from network * @skb: buffer to process * * netif_receive_skb() is the main receive data processing function. * It always succeeds. The buffer may be dropped during processing * for congestion control or by the protocol layers. * * This function may only be called from softirq context and interrupts * should be enabled. * * Return values (usually ignored): * NET_RX_SUCCESS: no congestion * NET_RX_DROP: packet was dropped */ int netif_receive_skb(struct sk_buff *skb) { net_timestamp_check(netdev_tstamp_prequeue, skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu, ret; rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); return ret; } rcu_read_unlock(); } #endif return __netif_receive_skb(skb); } EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending * Called with irqs disabled. */ static void flush_backlog(void *arg) { struct net_device *dev = arg; struct softnet_data *sd = &__get_cpu_var(softnet_data); struct sk_buff *skb, *tmp; rps_lock(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); kfree_skb(skb); input_queue_head_incr(sd); } } rps_unlock(sd); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); kfree_skb(skb); input_queue_head_incr(sd); } } } static int napi_gro_complete(struct sk_buff *skb) { struct packet_type *ptype; __be16 type = skb->protocol; struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; int err = -ENOENT; if (NAPI_GRO_CB(skb)->count == 1) { skb_shinfo(skb)->gso_size = 0; goto out; } rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || ptype->dev || !ptype->gro_complete) continue; err = ptype->gro_complete(skb); break; } rcu_read_unlock(); if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); return NET_RX_SUCCESS; } out: return netif_receive_skb(skb); } inline void napi_gro_flush(struct napi_struct *napi) { struct sk_buff *skb, *next; for (skb = napi->gro_list; skb; skb = next) { next = skb->next; skb->next = NULL; napi_gro_complete(skb); } napi->gro_count = 0; napi->gro_list = NULL; } EXPORT_SYMBOL(napi_gro_flush); enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct packet_type *ptype; __be16 type = skb->protocol; struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; int same_flow; int mac_len; enum gro_result ret; if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) goto normal; if (skb_is_gso(skb) || skb_has_frag_list(skb)) goto normal; rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || ptype->dev || !ptype->gro_receive) continue; skb_set_network_header(skb, skb_gro_offset(skb)); mac_len = skb->network_header - skb->mac_header; skb->mac_len = mac_len; NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; pp = ptype->gro_receive(&napi->gro_list, skb); break; } rcu_read_unlock(); if (&ptype->list == head) goto normal; same_flow = NAPI_GRO_CB(skb)->same_flow; ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { struct sk_buff *nskb = *pp; *pp = nskb->next; nskb->next = NULL; napi_gro_complete(nskb); napi->gro_count--; } if (same_flow) goto ok; if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) goto normal; napi->gro_count++; NAPI_GRO_CB(skb)->count = 1; skb_shinfo(skb)->gso_size = skb_gro_len(skb); skb->next = napi->gro_list; napi->gro_list = skb; ret = GRO_HELD; pull: if (skb_headlen(skb) < skb_gro_offset(skb)) { int grow = skb_gro_offset(skb) - skb_headlen(skb); BUG_ON(skb->end - skb->tail < grow); memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); skb->tail += grow; skb->data_len -= grow; skb_shinfo(skb)->frags[0].page_offset += grow; skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { skb_frag_unref(skb, 0); memmove(skb_shinfo(skb)->frags, skb_shinfo(skb)->frags + 1, --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); } } ok: return ret; normal: ret = GRO_NORMAL; goto pull; } EXPORT_SYMBOL(dev_gro_receive); static inline gro_result_t __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff *p; unsigned int maclen = skb->dev->hard_header_len; for (p = napi->gro_list; p; p = p->next) { unsigned long diffs; diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; diffs |= p->vlan_tci ^ skb->vlan_tci; if (maclen == ETH_HLEN) diffs |= compare_ether_header(skb_mac_header(p), skb_gro_mac_header(skb)); else if (!diffs) diffs = memcmp(skb_mac_header(p), skb_gro_mac_header(skb), maclen); NAPI_GRO_CB(p)->same_flow = !diffs; NAPI_GRO_CB(p)->flush = 0; } return dev_gro_receive(napi, skb); } gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) { switch (ret) { case GRO_NORMAL: if (netif_receive_skb(skb)) ret = GRO_DROP; break; case GRO_DROP: case GRO_MERGED_FREE: kfree_skb(skb); break; case GRO_HELD: case GRO_MERGED: break; } return ret; } EXPORT_SYMBOL(napi_skb_finish); void skb_gro_reset_offset(struct sk_buff *skb) { NAPI_GRO_CB(skb)->data_offset = 0; NAPI_GRO_CB(skb)->frag0 = NULL; NAPI_GRO_CB(skb)->frag0_len = 0; if (skb->mac_header == skb->tail && !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(&skb_shinfo(skb)->frags[0]); NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); } } EXPORT_SYMBOL(skb_gro_reset_offset); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { skb_gro_reset_offset(skb); return napi_skb_finish(__napi_gro_receive(napi, skb), skb); } EXPORT_SYMBOL(napi_gro_receive); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { __skb_pull(skb, skb_headlen(skb)); /* restore the reserve we had after netdev_alloc_skb_ip_align() */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); skb->vlan_tci = 0; skb->dev = napi->dev; skb->skb_iif = 0; napi->skb = skb; } struct sk_buff *napi_get_frags(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; if (!skb) { skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); if (skb) napi->skb = skb; } return skb; } EXPORT_SYMBOL(napi_get_frags); gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret) { switch (ret) { case GRO_NORMAL: case GRO_HELD: skb->protocol = eth_type_trans(skb, skb->dev); if (ret == GRO_HELD) skb_gro_pull(skb, -ETH_HLEN); else if (netif_receive_skb(skb)) ret = GRO_DROP; break; case GRO_DROP: case GRO_MERGED_FREE: napi_reuse_skb(napi, skb); break; case GRO_MERGED: break; } return ret; } EXPORT_SYMBOL(napi_frags_finish); struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; struct ethhdr *eth; unsigned int hlen; unsigned int off; napi->skb = NULL; skb_reset_mac_header(skb); skb_gro_reset_offset(skb); off = skb_gro_offset(skb); hlen = off + sizeof(*eth); eth = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { eth = skb_gro_header_slow(skb, hlen, off); if (unlikely(!eth)) { napi_reuse_skb(napi, skb); skb = NULL; goto out; } } skb_gro_pull(skb, sizeof(*eth)); /* * This works because the only protocols we care about don't require * special handling. We'll fix it up properly at the end. */ skb->protocol = eth->h_proto; out: return skb; } EXPORT_SYMBOL(napi_frags_skb); gro_result_t napi_gro_frags(struct napi_struct *napi) { struct sk_buff *skb = napi_frags_skb(napi); if (!skb) return GRO_DROP; return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); } EXPORT_SYMBOL(napi_gro_frags); /* * net_rps_action sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. */ static void net_rps_action_and_irq_enable(struct softnet_data *sd) { #ifdef CONFIG_RPS struct softnet_data *remsd = sd->rps_ipi_list; if (remsd) { sd->rps_ipi_list = NULL; local_irq_enable(); /* Send pending IPI's to kick RPS processing on remote cpus. */ while (remsd) { struct softnet_data *next = remsd->rps_ipi_next; if (cpu_online(remsd->cpu)) __smp_call_function_single(remsd->cpu, &remsd->csd, 0); remsd = next; } } else #endif local_irq_enable(); } static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); #ifdef CONFIG_RPS /* Check if we have pending ipi, its better to send them now, * not waiting net_rx_action() end. */ if (sd->rps_ipi_list) { local_irq_disable(); net_rps_action_and_irq_enable(sd); } #endif napi->weight = weight_p; local_irq_disable(); while (work < quota) { struct sk_buff *skb; unsigned int qlen; while ((skb = __skb_dequeue(&sd->process_queue))) { local_irq_enable(); __netif_receive_skb(skb); local_irq_disable(); input_queue_head_incr(sd); if (++work >= quota) { local_irq_enable(); return work; } } rps_lock(sd); qlen = skb_queue_len(&sd->input_pkt_queue); if (qlen) skb_queue_splice_tail_init(&sd->input_pkt_queue, &sd->process_queue); if (qlen < quota - work) { /* * Inline a custom version of __napi_complete(). * only current cpu owns and manipulates this napi, * and NAPI_STATE_SCHED is the only possible flag set on backlog. * we can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ list_del(&napi->poll_list); napi->state = 0; quota = work + qlen; } rps_unlock(sd); } local_irq_enable(); return work; } /** * __napi_schedule - schedule for receive * @n: entry to schedule * * The entry's receive function will be scheduled to run */ void __napi_schedule(struct napi_struct *n) { unsigned long flags; local_irq_save(flags); ____napi_schedule(&__get_cpu_var(softnet_data), n); local_irq_restore(flags); } EXPORT_SYMBOL(__napi_schedule); void __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); BUG_ON(n->gro_list); list_del(&n->poll_list); smp_mb__before_clear_bit(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); void napi_complete(struct napi_struct *n) { unsigned long flags; /* * don't let napi dequeue from the cpu poll list * just in case its running on a different cpu */ if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) return; napi_gro_flush(n); local_irq_save(flags); __napi_complete(n); local_irq_restore(flags); } EXPORT_SYMBOL(napi_complete); void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { INIT_LIST_HEAD(&napi->poll_list); napi->gro_count = 0; napi->gro_list = NULL; napi->skb = NULL; napi->poll = poll; napi->weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL spin_lock_init(&napi->poll_lock); napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); } EXPORT_SYMBOL(netif_napi_add); void netif_napi_del(struct napi_struct *napi) { struct sk_buff *skb, *next; list_del_init(&napi->dev_list); napi_free_frags(napi); for (skb = napi->gro_list; skb; skb = next) { next = skb->next; skb->next = NULL; kfree_skb(skb); } napi->gro_list = NULL; napi->gro_count = 0; } EXPORT_SYMBOL(netif_napi_del); static void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; void *have; local_irq_disable(); while (!list_empty(&sd->poll_list)) { struct napi_struct *n; int work, weight; /* If softirq window is exhuasted then punt. * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) goto softnet_break; local_irq_enable(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new * entries to the tail of this list, and only ->poll() * calls can remove this head entry from the list. */ n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); have = netpoll_poll_lock(n); weight = n->weight; /* This NAPI_STATE_SCHED test is for avoiding a race * with netpoll's poll_napi(). Only the entity which * obtains the lock and sees NAPI_STATE_SCHED set will * actually make the ->poll() call. Therefore we avoid * accidentally calling ->poll() when NAPI is not scheduled. */ work = 0; if (test_bit(NAPI_STATE_SCHED, &n->state)) { work = n->poll(n, weight); trace_napi_poll(n); } WARN_ON_ONCE(work > weight); budget -= work; local_irq_disable(); /* Drivers must not modify the NAPI state if they * consume the entire weight. In such cases this code * still "owns" the NAPI instance and therefore can * move the instance around on the list at-will. */ if (unlikely(work == weight)) { if (unlikely(napi_disable_pending(n))) { local_irq_enable(); napi_complete(n); local_irq_disable(); } else list_move_tail(&n->poll_list, &sd->poll_list); } netpoll_poll_unlock(have); } out: net_rps_action_and_irq_enable(sd); #ifdef CONFIG_NET_DMA /* * There may not be any more sk_buffs coming right now, so push * any pending DMA copies to hardware */ dma_issue_pending_all(); #endif return; softnet_break: sd->time_squeeze++; __raise_softirq_irqoff(NET_RX_SOFTIRQ); goto out; } static gifconf_func_t *gifconf_list[NPROTO]; /** * register_gifconf - register a SIOCGIF handler * @family: Address family * @gifconf: Function handler * * Register protocol dependent address dumping routines. The handler * that is passed must not be freed or reused until it has been replaced * by another handler. */ int register_gifconf(unsigned int family, gifconf_func_t *gifconf) { if (family >= NPROTO) return -EINVAL; gifconf_list[family] = gifconf; return 0; } EXPORT_SYMBOL(register_gifconf); /* * Map an interface index to its name (SIOCGIFNAME) */ /* * We need this ioctl for efficient implementation of the * if_indextoname() function required by the IPv6 API. Without * it, we would have to search all the interfaces to find a * match. --pb */ static int dev_ifname(struct net *net, struct ifreq __user *arg) { struct net_device *dev; struct ifreq ifr; /* * Fetch the caller's info block. */ if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); if (!dev) { rcu_read_unlock(); return -ENODEV; } strcpy(ifr.ifr_name, dev->name); rcu_read_unlock(); if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) return -EFAULT; return 0; } /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. * Thus we will need a 'compatibility mode'. */ static int dev_ifconf(struct net *net, char __user *arg) { struct ifconf ifc; struct net_device *dev; char __user *pos; int len; int total; int i; /* * Fetch the caller's info block. */ if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) return -EFAULT; pos = ifc.ifc_buf; len = ifc.ifc_len; /* * Loop over the interfaces, and write an info block for each. */ total = 0; for_each_netdev(net, dev) { for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { int done; if (!pos) done = gifconf_list[i](dev, NULL, 0); else done = gifconf_list[i](dev, pos + total, len - total); if (done < 0) return -EFAULT; total += done; } } } /* * All done. Write the updated control block back to the caller. */ ifc.ifc_len = total; /* * Both BSD and Solaris return 0 here, so we do too. */ return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; } #ifdef CONFIG_PROC_FS #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); struct net_device *dev; struct hlist_node *p; struct hlist_head *h; unsigned int count = 0, offset = get_offset(*pos); h = &net->dev_name_head[get_bucket(*pos)]; hlist_for_each_entry_rcu(dev, p, h, name_hlist) { if (++count == offset) return dev; } return NULL; } static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos) { struct net_device *dev; unsigned int bucket; do { dev = dev_from_same_bucket(seq, pos); if (dev) return dev; bucket = get_bucket(*pos) + 1; *pos = set_bucket_offset(bucket, 1); } while (bucket < NETDEV_HASHENTRIES); return NULL; } /* * This is invoked by the /proc filesystem handler to display a device * in detail. */ void *dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); if (!*pos) return SEQ_START_TOKEN; if (get_bucket(*pos) >= NETDEV_HASHENTRIES) return NULL; return dev_from_bucket(seq, pos); } void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return dev_from_bucket(seq, pos); } void dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", dev->name, stats->rx_bytes, stats->rx_packets, stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors, stats->rx_fifo_errors, stats->rx_length_errors + stats->rx_over_errors + stats->rx_crc_errors + stats->rx_frame_errors, stats->rx_compressed, stats->multicast, stats->tx_bytes, stats->tx_packets, stats->tx_errors, stats->tx_dropped, stats->tx_fifo_errors, stats->collisions, stats->tx_carrier_errors + stats->tx_aborted_errors + stats->tx_window_errors + stats->tx_heartbeat_errors, stats->tx_compressed); } /* * Called from the PROCfs module. This now uses the new arbitrary sized * /proc/net interface to create /proc/net/dev */ static int dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Inter-| Receive " " | Transmit\n" " face |bytes packets errs drop fifo frame " "compressed multicast|bytes packets errs " "drop fifo colls carrier compressed\n"); else dev_seq_printf_stats(seq, v); return 0; } static struct softnet_data *softnet_get_online(loff_t *pos) { struct softnet_data *sd = NULL; while (*pos < nr_cpu_ids) if (cpu_online(*pos)) { sd = &per_cpu(softnet_data, *pos); break; } else ++*pos; return sd; } static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) { return softnet_get_online(pos); } static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return softnet_get_online(pos); } static void softnet_seq_stop(struct seq_file *seq, void *v) { } static int softnet_seq_show(struct seq_file *seq, void *v) { struct softnet_data *sd = v; seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", sd->processed, sd->dropped, sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ sd->cpu_collision, sd->received_rps); return 0; } static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_seq_show, }; static int dev_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &dev_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations dev_seq_fops = { .owner = THIS_MODULE, .open = dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static const struct seq_operations softnet_seq_ops = { .start = softnet_seq_start, .next = softnet_seq_next, .stop = softnet_seq_stop, .show = softnet_seq_show, }; static int softnet_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &softnet_seq_ops); } static const struct file_operations softnet_seq_fops = { .owner = THIS_MODULE, .open = softnet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *ptype_get_idx(loff_t pos) { struct packet_type *pt = NULL; loff_t i = 0; int t; list_for_each_entry_rcu(pt, &ptype_all, list) { if (i == pos) return pt; ++i; } for (t = 0; t < PTYPE_HASH_SIZE; t++) { list_for_each_entry_rcu(pt, &ptype_base[t], list) { if (i == pos) return pt; ++i; } } return NULL; } static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) return ptype_get_idx(0); pt = v; nxt = pt->list.next; if (pt->type == htons(ETH_P_ALL)) { if (nxt != &ptype_all) goto found; hash = 0; nxt = ptype_base[0].next; } else hash = ntohs(pt->type) & PTYPE_HASH_MASK; while (nxt == &ptype_base[hash]) { if (++hash >= PTYPE_HASH_SIZE) return NULL; nxt = ptype_base[hash].next; } found: return list_entry(nxt, struct packet_type, list); } static void ptype_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ptype_seq_show(struct seq_file *seq, void *v) { struct packet_type *pt = v; if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else seq_printf(seq, "%04x", ntohs(pt->type)); seq_printf(seq, " %-8s %pF\n", pt->dev ? pt->dev->name : "", pt->func); } return 0; } static const struct seq_operations ptype_seq_ops = { .start = ptype_seq_start, .next = ptype_seq_next, .stop = ptype_seq_stop, .show = ptype_seq_show, }; static int ptype_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ptype_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations ptype_seq_fops = { .owner = THIS_MODULE, .open = ptype_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init dev_proc_net_init(struct net *net) { int rc = -ENOMEM; if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) goto out; if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) goto out_dev; if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) goto out_softnet; if (wext_proc_init(net)) goto out_ptype; rc = 0; out: return rc; out_ptype: proc_net_remove(net, "ptype"); out_softnet: proc_net_remove(net, "softnet_stat"); out_dev: proc_net_remove(net, "dev"); goto out; } static void __net_exit dev_proc_net_exit(struct net *net) { wext_proc_exit(net); proc_net_remove(net, "ptype"); proc_net_remove(net, "softnet_stat"); proc_net_remove(net, "dev"); } static struct pernet_operations __net_initdata dev_proc_ops = { .init = dev_proc_net_init, .exit = dev_proc_net_exit, }; static int __init dev_proc_init(void) { return register_pernet_subsys(&dev_proc_ops); } #else #define dev_proc_init() 0 #endif /* CONFIG_PROC_FS */ /** * netdev_set_master - set up master pointer * @slave: slave device * @master: new master device * * Changes the master device of the slave. Pass %NULL to break the * bonding. The caller must hold the RTNL semaphore. On a failure * a negative errno code is returned. On success the reference counts * are adjusted and the function returns zero. */ int netdev_set_master(struct net_device *slave, struct net_device *master) { struct net_device *old = slave->master; ASSERT_RTNL(); if (master) { if (old) return -EBUSY; dev_hold(master); } slave->master = master; if (old) dev_put(old); return 0; } EXPORT_SYMBOL(netdev_set_master); /** * netdev_set_bond_master - set up bonding master/slave pair * @slave: slave device * @master: new master device * * Changes the master device of the slave. Pass %NULL to break the * bonding. The caller must hold the RTNL semaphore. On a failure * a negative errno code is returned. On success %RTM_NEWLINK is sent * to the routing socket and the function returns zero. */ int netdev_set_bond_master(struct net_device *slave, struct net_device *master) { int err; ASSERT_RTNL(); err = netdev_set_master(slave, master); if (err) return err; if (master) slave->flags |= IFF_SLAVE; else slave->flags &= ~IFF_SLAVE; rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); return 0; } EXPORT_SYMBOL(netdev_set_bond_master); static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) ops->ndo_change_rx_flags(dev, flags); } static int __dev_set_promiscuity(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; uid_t uid; gid_t gid; ASSERT_RTNL(); dev->flags |= IFF_PROMISC; dev->promiscuity += inc; if (dev->promiscuity == 0) { /* * Avoid overflow. * If inc causes overflow, untouch promisc and return error. */ if (inc < 0) dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", dev->name); return -EOVERFLOW; } } if (dev->flags != old_flags) { pr_info("device %s %s promiscuous mode\n", dev->name, dev->flags & IFF_PROMISC ? "entered" : "left"); if (audit_enabled) { current_uid_gid(&uid, &gid); audit_log(current->audit_context, GFP_ATOMIC, AUDIT_ANOM_PROMISCUOUS, "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", dev->name, (dev->flags & IFF_PROMISC), (old_flags & IFF_PROMISC), audit_get_loginuid(current), uid, gid, audit_get_sessionid(current)); } dev_change_rx_flags(dev, IFF_PROMISC); } return 0; } /** * dev_set_promiscuity - update promiscuity count on a device * @dev: device * @inc: modifier * * Add or remove promiscuity from a device. While the count in the device * remains above zero the interface remains promiscuous. Once it hits zero * the device reverts back to normal filtering operation. A negative inc * value is used to drop promiscuity on the device. * Return 0 if successful or a negative errno code on error. */ int dev_set_promiscuity(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; int err; err = __dev_set_promiscuity(dev, inc); if (err < 0) return err; if (dev->flags != old_flags) dev_set_rx_mode(dev); return err; } EXPORT_SYMBOL(dev_set_promiscuity); /** * dev_set_allmulti - update allmulti count on a device * @dev: device * @inc: modifier * * Add or remove reception of all multicast frames to a device. While the * count in the device remains above zero the interface remains listening * to all interfaces. Once it hits zero the device reverts back to normal * filtering operation. A negative @inc value is used to drop the counter * when releasing a resource needing all multicasts. * Return 0 if successful or a negative errno code on error. */ int dev_set_allmulti(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; ASSERT_RTNL(); dev->flags |= IFF_ALLMULTI; dev->allmulti += inc; if (dev->allmulti == 0) { /* * Avoid overflow. * If inc causes overflow, untouch allmulti and return error. */ if (inc < 0) dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", dev->name); return -EOVERFLOW; } } if (dev->flags ^ old_flags) { dev_change_rx_flags(dev, IFF_ALLMULTI); dev_set_rx_mode(dev); } return 0; } EXPORT_SYMBOL(dev_set_allmulti); /* * Upload unicast and multicast address lists to device and * configure RX filtering. When the device doesn't support unicast * filtering it is put in promiscuous mode while unicast addresses * are present. */ void __dev_set_rx_mode(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; /* dev_open will call this function so the list will stay sane. */ if (!(dev->flags&IFF_UP)) return; if (!netif_device_present(dev)) return; if (!(dev->priv_flags & IFF_UNICAST_FLT)) { /* Unicast addresses changes may only happen under the rtnl, * therefore calling __dev_set_promiscuity here is safe. */ if (!netdev_uc_empty(dev) && !dev->uc_promisc) { __dev_set_promiscuity(dev, 1); dev->uc_promisc = true; } else if (netdev_uc_empty(dev) && dev->uc_promisc) { __dev_set_promiscuity(dev, -1); dev->uc_promisc = false; } } if (ops->ndo_set_rx_mode) ops->ndo_set_rx_mode(dev); } void dev_set_rx_mode(struct net_device *dev) { netif_addr_lock_bh(dev); __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); } /** * dev_get_flags - get flags reported to userspace * @dev: device * * Get the combination of flag bits exported through APIs to userspace. */ unsigned dev_get_flags(const struct net_device *dev) { unsigned flags; flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | IFF_RUNNING | IFF_LOWER_UP | IFF_DORMANT)) | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); if (netif_running(dev)) { if (netif_oper_up(dev)) flags |= IFF_RUNNING; if (netif_carrier_ok(dev)) flags |= IFF_LOWER_UP; if (netif_dormant(dev)) flags |= IFF_DORMANT; } return flags; } EXPORT_SYMBOL(dev_get_flags); int __dev_change_flags(struct net_device *dev, unsigned int flags) { unsigned int old_flags = dev->flags; int ret; ASSERT_RTNL(); /* * Set the flags on our device. */ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | IFF_AUTOMEDIA)) | (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | IFF_ALLMULTI)); /* * Load in the correct multicast list now the flags have changed. */ if ((old_flags ^ flags) & IFF_MULTICAST) dev_change_rx_flags(dev, IFF_MULTICAST); dev_set_rx_mode(dev); /* * Have we downed the interface. We handle IFF_UP ourselves * according to user attempts to set it, rather than blindly * setting it. */ ret = 0; if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); if (!ret) dev_set_rx_mode(dev); } if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? 1 : -1; dev->gflags ^= IFF_PROMISC; dev_set_promiscuity(dev, inc); } /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI is important. Some (broken) drivers set IFF_PROMISC, when IFF_ALLMULTI is requested not asking us and not reporting. */ if ((flags ^ dev->gflags) & IFF_ALLMULTI) { int inc = (flags & IFF_ALLMULTI) ? 1 : -1; dev->gflags ^= IFF_ALLMULTI; dev_set_allmulti(dev, inc); } return ret; } void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) { unsigned int changes = dev->flags ^ old_flags; if (changes & IFF_UP) { if (dev->flags & IFF_UP) call_netdevice_notifiers(NETDEV_UP, dev); else call_netdevice_notifiers(NETDEV_DOWN, dev); } if (dev->flags & IFF_UP && (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) call_netdevice_notifiers(NETDEV_CHANGE, dev); } /** * dev_change_flags - change device settings * @dev: device * @flags: device state flags * * Change settings on device based state flags. The flags are * in the userspace exported format. */ int dev_change_flags(struct net_device *dev, unsigned int flags) { int ret; unsigned int changes, old_flags = dev->flags; ret = __dev_change_flags(dev, flags); if (ret < 0) return ret; changes = old_flags ^ dev->flags; if (changes) rtmsg_ifinfo(RTM_NEWLINK, dev, changes); __dev_notify_flags(dev, old_flags); return ret; } EXPORT_SYMBOL(dev_change_flags); /** * dev_set_mtu - Change maximum transfer unit * @dev: device * @new_mtu: new transfer unit * * Change the maximum transfer size of the network device. */ int dev_set_mtu(struct net_device *dev, int new_mtu) { const struct net_device_ops *ops = dev->netdev_ops; int err; if (new_mtu == dev->mtu) return 0; /* MTU must be positive. */ if (new_mtu < 0) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; err = 0; if (ops->ndo_change_mtu) err = ops->ndo_change_mtu(dev, new_mtu); else dev->mtu = new_mtu; if (!err && dev->flags & IFF_UP) call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); return err; } EXPORT_SYMBOL(dev_set_mtu); /** * dev_set_group - Change group this device belongs to * @dev: device * @new_group: group this device should belong to */ void dev_set_group(struct net_device *dev, int new_group) { dev->group = new_group; } EXPORT_SYMBOL(dev_set_group); /** * dev_set_mac_address - Change Media Access Control Address * @dev: device * @sa: new address * * Change the hardware (MAC) address of the device */ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) { const struct net_device_ops *ops = dev->netdev_ops; int err; if (!ops->ndo_set_mac_address) return -EOPNOTSUPP; if (sa->sa_family != dev->type) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; err = ops->ndo_set_mac_address(dev, sa); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; } EXPORT_SYMBOL(dev_set_mac_address); /* * Perform the SIOCxIFxxx calls, inside rcu_read_lock() */ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); if (!dev) return -ENODEV; switch (cmd) { case SIOCGIFFLAGS: /* Get interface flags */ ifr->ifr_flags = (short) dev_get_flags(dev); return 0; case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ ifr->ifr_metric = 0; return 0; case SIOCGIFMTU: /* Get the MTU of a device */ ifr->ifr_mtu = dev->mtu; return 0; case SIOCGIFHWADDR: if (!dev->addr_len) memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); else memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); ifr->ifr_hwaddr.sa_family = dev->type; return 0; case SIOCGIFSLAVE: err = -EINVAL; break; case SIOCGIFMAP: ifr->ifr_map.mem_start = dev->mem_start; ifr->ifr_map.mem_end = dev->mem_end; ifr->ifr_map.base_addr = dev->base_addr; ifr->ifr_map.irq = dev->irq; ifr->ifr_map.dma = dev->dma; ifr->ifr_map.port = dev->if_port; return 0; case SIOCGIFINDEX: ifr->ifr_ifindex = dev->ifindex; return 0; case SIOCGIFTXQLEN: ifr->ifr_qlen = dev->tx_queue_len; return 0; default: /* dev_ioctl() should ensure this case * is never reached */ WARN_ON(1); err = -ENOTTY; break; } return err; } /* * Perform the SIOCxIFxxx calls, inside rtnl_lock() */ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; if (!dev) return -ENODEV; ops = dev->netdev_ops; switch (cmd) { case SIOCSIFFLAGS: /* Set interface flags */ return dev_change_flags(dev, ifr->ifr_flags); case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ return -EOPNOTSUPP; case SIOCSIFMTU: /* Set the MTU of a device */ return dev_set_mtu(dev, ifr->ifr_mtu); case SIOCSIFHWADDR: return dev_set_mac_address(dev, &ifr->ifr_hwaddr); case SIOCSIFHWBROADCAST: if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return 0; case SIOCSIFMAP: if (ops->ndo_set_config) { if (!netif_device_present(dev)) return -ENODEV; return ops->ndo_set_config(dev, &ifr->ifr_map); } return -EOPNOTSUPP; case SIOCADDMULTI: if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCDELMULTI: if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) return -EINVAL; dev->tx_queue_len = ifr->ifr_qlen; return 0; case SIOCSIFNAME: ifr->ifr_newname[IFNAMSIZ-1] = '\0'; return dev_change_name(dev, ifr->ifr_newname); case SIOCSHWTSTAMP: err = net_hwtstamp_validate(ifr); if (err) return err; /* fall through */ /* * Unknown or private ioctl */ default: if ((cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15) || cmd == SIOCBONDENSLAVE || cmd == SIOCBONDRELEASE || cmd == SIOCBONDSETHWADDR || cmd == SIOCBONDSLAVEINFOQUERY || cmd == SIOCBONDINFOQUERY || cmd == SIOCBONDCHANGEACTIVE || cmd == SIOCGMIIPHY || cmd == SIOCGMIIREG || cmd == SIOCSMIIREG || cmd == SIOCBRADDIF || cmd == SIOCBRDELIF || cmd == SIOCSHWTSTAMP || cmd == SIOCWANDEV) { err = -EOPNOTSUPP; if (ops->ndo_do_ioctl) { if (netif_device_present(dev)) err = ops->ndo_do_ioctl(dev, ifr, cmd); else err = -ENODEV; } } else err = -EINVAL; } return err; } /* * This function handles all "interface"-type I/O control requests. The actual * 'doing' part of this is dev_ifsioc above. */ /** * dev_ioctl - network device ioctl * @net: the applicable net namespace * @cmd: command to issue * @arg: pointer to a struct ifreq in user space * * Issue ioctl functions to devices. This is normally called by the * user space syscall interfaces but can sometimes be useful for * other purposes. The return value is the return from the syscall if * positive or a negative errno code on error. */ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct ifreq ifr; int ret; char *colon; /* One special case: SIOCGIFCONF takes ifconf argument and requires shared lock, because it sleeps writing to user space. */ if (cmd == SIOCGIFCONF) { rtnl_lock(); ret = dev_ifconf(net, (char __user *) arg); rtnl_unlock(); return ret; } if (cmd == SIOCGIFNAME) return dev_ifname(net, (struct ifreq __user *)arg); if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; ifr.ifr_name[IFNAMSIZ-1] = 0; colon = strchr(ifr.ifr_name, ':'); if (colon) *colon = 0; /* * See which interface the caller is talking about. */ switch (cmd) { /* * These ioctl calls: * - can be done by all. * - atomic and do not require locking. * - return a value */ case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFHWADDR: case SIOCGIFSLAVE: case SIOCGIFMAP: case SIOCGIFINDEX: case SIOCGIFTXQLEN: dev_load(net, ifr.ifr_name); rcu_read_lock(); ret = dev_ifsioc_locked(net, &ifr, cmd); rcu_read_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; case SIOCETHTOOL: dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ethtool(net, &ifr); rtnl_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - return a value */ case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSIFNAME: if (!capable(CAP_NET_ADMIN)) return -EPERM; dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - do not return a value */ case SIOCSIFFLAGS: case SIOCSIFMETRIC: case SIOCSIFMTU: case SIOCSIFMAP: case SIOCSIFHWADDR: case SIOCSIFSLAVE: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFHWBROADCAST: case SIOCSIFTXQLEN: case SIOCSMIIREG: case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSHWTSTAMP: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); return ret; case SIOCGIFMEM: /* Get the per device memory space. We can add this but * currently do not support it */ case SIOCSIFMEM: /* Set the per device memory buffer space. * Not applicable in our case */ case SIOCSIFLINK: return -ENOTTY; /* * Unknown or private ioctl. */ default: if (cmd == SIOCWANDEV || (cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15)) { dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; return ret; } /* Take care of Wireless Extensions */ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) return wext_handle_ioctl(net, &ifr, cmd, arg); return -ENOTTY; } } /** * dev_new_index - allocate an ifindex * @net: the applicable net namespace * * Returns a suitable unique value for a new device interface * number. The caller must hold the rtnl semaphore or the * dev_base_lock to be sure it remains unique. */ static int dev_new_index(struct net *net) { static int ifindex; for (;;) { if (++ifindex <= 0) ifindex = 1; if (!__dev_get_by_index(net, ifindex)) return ifindex; } } /* Delayed registration/unregisteration */ static LIST_HEAD(net_todo_list); static void net_set_todo(struct net_device *dev) { list_add_tail(&dev->todo_list, &net_todo_list); } static void rollback_registered_many(struct list_head *head) { struct net_device *dev, *tmp; BUG_ON(dev_boot_phase); ASSERT_RTNL(); list_for_each_entry_safe(dev, tmp, head, unreg_list) { /* Some devices call without registering * for initialization unwind. Remove those * devices and proceed with the remaining. */ if (dev->reg_state == NETREG_UNINITIALIZED) { pr_debug("unregister_netdevice: device %s/%p never was registered\n", dev->name, dev); WARN_ON(1); list_del(&dev->unreg_list); continue; } dev->dismantle = true; BUG_ON(dev->reg_state != NETREG_REGISTERED); } /* If device is running, close it first. */ dev_close_many(head); list_for_each_entry(dev, head, unreg_list) { /* And unlink it from device chain. */ unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; } synchronize_net(); list_for_each_entry(dev, head, unreg_list) { /* Shutdown queueing discipline. */ dev_shutdown(dev); /* Notify protocols, that we are about to destroy this device. They should clean all the things. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); /* * Flush the unicast and multicast chains */ dev_uc_flush(dev); dev_mc_flush(dev); if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); /* Notifier chain MUST detach us from master device. */ WARN_ON(dev->master); /* Remove entries from kobject tree */ netdev_unregister_kobject(dev); } /* Process any work delayed until the end of the batch */ dev = list_first_entry(head, struct net_device, unreg_list); call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); synchronize_net(); list_for_each_entry(dev, head, unreg_list) dev_put(dev); } static void rollback_registered(struct net_device *dev) { LIST_HEAD(single); list_add(&dev->unreg_list, &single); rollback_registered_many(&single); list_del(&single); } static netdev_features_t netdev_fix_features(struct net_device *dev, netdev_features_t features) { /* Fix illegal checksum combinations */ if ((features & NETIF_F_HW_CSUM) && (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { netdev_warn(dev, "mixed HW and IP checksum settings.\n"); features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { netdev_dbg(dev, "Dropping NETIF_F_SG since no checksum feature.\n"); features &= ~NETIF_F_SG; } /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; } /* TSO ECN requires that TSO is present as well. */ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) features &= ~NETIF_F_TSO_ECN; /* Software GSO depends on SG. */ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); features &= ~NETIF_F_GSO; } /* UFO needs SG and checksumming */ if (features & NETIF_F_UFO) { /* maybe split UFO into V4 and V6? */ if (!((features & NETIF_F_GEN_CSUM) || (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { netdev_dbg(dev, "Dropping NETIF_F_UFO since no checksum offload features.\n"); features &= ~NETIF_F_UFO; } if (!(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); features &= ~NETIF_F_UFO; } } return features; } int __netdev_update_features(struct net_device *dev) { netdev_features_t features; int err = 0; ASSERT_RTNL(); features = netdev_get_wanted_features(dev); if (dev->netdev_ops->ndo_fix_features) features = dev->netdev_ops->ndo_fix_features(dev, features); /* driver might be less strict about feature dependencies */ features = netdev_fix_features(dev, features); if (dev->features == features) return 0; netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", &dev->features, &features); if (dev->netdev_ops->ndo_set_features) err = dev->netdev_ops->ndo_set_features(dev, features); if (unlikely(err < 0)) { netdev_err(dev, "set_features() failed (%d); wanted %pNF, left %pNF\n", err, &features, &dev->features); return -1; } if (!err) dev->features = features; return 1; } /** * netdev_update_features - recalculate device features * @dev: the device to check * * Recalculate dev->features set and send notifications if it * has changed. Should be called after driver or hardware dependent * conditions might have changed that influence the features. */ void netdev_update_features(struct net_device *dev) { if (__netdev_update_features(dev)) netdev_features_change(dev); } EXPORT_SYMBOL(netdev_update_features); /** * netdev_change_features - recalculate device features * @dev: the device to check * * Recalculate dev->features set and send notifications even * if they have not changed. Should be called instead of * netdev_update_features() if also dev->vlan_features might * have changed to allow the changes to be propagated to stacked * VLAN devices. */ void netdev_change_features(struct net_device *dev) { __netdev_update_features(dev); netdev_features_change(dev); } EXPORT_SYMBOL(netdev_change_features); /** * netif_stacked_transfer_operstate - transfer operstate * @rootdev: the root or lower level device to transfer state from * @dev: the device to transfer operstate to * * Transfer operational state from root to device. This is normally * called when a stacking relationship exists between the root * device and the device(a leaf device). */ void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev) { if (rootdev->operstate == IF_OPER_DORMANT) netif_dormant_on(dev); else netif_dormant_off(dev); if (netif_carrier_ok(rootdev)) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); } } EXPORT_SYMBOL(netif_stacked_transfer_operstate); #ifdef CONFIG_RPS static int netif_alloc_rx_queues(struct net_device *dev) { unsigned int i, count = dev->num_rx_queues; struct netdev_rx_queue *rx; BUG_ON(count < 1); rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); if (!rx) { pr_err("netdev: Unable to allocate %u rx queues\n", count); return -ENOMEM; } dev->_rx = rx; for (i = 0; i < count; i++) rx[i].dev = dev; return 0; } #endif static void netdev_init_one_queue(struct net_device *dev, struct netdev_queue *queue, void *_unused) { /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); queue->xmit_lock_owner = -1; netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; #ifdef CONFIG_BQL dql_init(&queue->dql, HZ); #endif } static int netif_alloc_netdev_queues(struct net_device *dev) { unsigned int count = dev->num_tx_queues; struct netdev_queue *tx; BUG_ON(count < 1); tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); if (!tx) { pr_err("netdev: Unable to allocate %u tx queues\n", count); return -ENOMEM; } dev->_tx = tx; netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); spin_lock_init(&dev->tx_global_lock); return 0; } /** * register_netdevice - register a network device * @dev: device to register * * Take a completed network device structure and add it to the kernel * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * * Callers must hold the rtnl semaphore. You may want * register_netdev() instead of this. * * BUGS: * The locking appears insufficient to guarantee two parallel registers * will not get the same name. */ int register_netdevice(struct net_device *dev) { int ret; struct net *net = dev_net(dev); BUG_ON(dev_boot_phase); ASSERT_RTNL(); might_sleep(); /* When net_device's are persistent, this will be fatal. */ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); BUG_ON(!net); spin_lock_init(&dev->addr_list_lock); netdev_set_addr_lockdep_class(dev); dev->iflink = -1; ret = dev_get_valid_name(dev, dev->name); if (ret < 0) goto out; /* Init, if this function is available */ if (dev->netdev_ops->ndo_init) { ret = dev->netdev_ops->ndo_init(dev); if (ret) { if (ret > 0) ret = -EIO; goto out; } } dev->ifindex = dev_new_index(net); if (dev->iflink == -1) dev->iflink = dev->ifindex; /* Transfer changeable features to wanted_features and enable * software offloads (GSO and GRO). */ dev->hw_features |= NETIF_F_SOFT_FEATURES; dev->features |= NETIF_F_SOFT_FEATURES; dev->wanted_features = dev->features & dev->hw_features; /* Turn on no cache copy if HW is doing checksum */ if (!(dev->flags & IFF_LOOPBACK)) { dev->hw_features |= NETIF_F_NOCACHE_COPY; if (dev->features & NETIF_F_ALL_CSUM) { dev->wanted_features |= NETIF_F_NOCACHE_COPY; dev->features |= NETIF_F_NOCACHE_COPY; } } /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. */ dev->vlan_features |= NETIF_F_HIGHDMA; ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) goto err_uninit; ret = netdev_register_kobject(dev); if (ret) goto err_uninit; dev->reg_state = NETREG_REGISTERED; __netdev_update_features(dev); /* * Default initial state at registry is that the * device is present. */ set_bit(__LINK_STATE_PRESENT, &dev->state); dev_init_scheduler(dev); dev_hold(dev); list_netdevice(dev); /* Notify protocols, that a new device appeared. */ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); ret = notifier_to_errno(ret); if (ret) { rollback_registered(dev); dev->reg_state = NETREG_UNREGISTERED; } /* * Prevent userspace races by waiting until the network * device is fully setup before sending notifications. */ if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); out: return ret; err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); goto out; } EXPORT_SYMBOL(register_netdevice); /** * init_dummy_netdev - init a dummy network device for NAPI * @dev: device to init * * This takes a network device structure and initialize the minimum * amount of fields so it can be used to schedule NAPI polls without * registering a full blown interface. This is to be used by drivers * that need to tie several hardware interfaces to a single NAPI * poll scheduler due to HW limitations. */ int init_dummy_netdev(struct net_device *dev) { /* Clear everything. Note we don't initialize spinlocks * are they aren't supposed to be taken by any of the * NAPI code and this dummy netdev is supposed to be * only ever used for NAPI polls */ memset(dev, 0, sizeof(struct net_device)); /* make sure we BUG if trying to hit standard * register/unregister code path */ dev->reg_state = NETREG_DUMMY; /* NAPI wants this */ INIT_LIST_HEAD(&dev->napi_list); /* a dummy interface is started by default */ set_bit(__LINK_STATE_PRESENT, &dev->state); set_bit(__LINK_STATE_START, &dev->state); /* Note : We dont allocate pcpu_refcnt for dummy devices, * because users of this 'device' dont need to change * its refcount. */ return 0; } EXPORT_SYMBOL_GPL(init_dummy_netdev); /** * register_netdev - register a network device * @dev: device to register * * Take a completed network device structure and add it to the kernel * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * * This is a wrapper around register_netdevice that takes the rtnl semaphore * and expands the device name if you passed a format string to * alloc_netdev. */ int register_netdev(struct net_device *dev) { int err; rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); return err; } EXPORT_SYMBOL(register_netdev); int netdev_refcnt_read(const struct net_device *dev) { int i, refcnt = 0; for_each_possible_cpu(i) refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); return refcnt; } EXPORT_SYMBOL(netdev_refcnt_read); /* * netdev_wait_allrefs - wait until all references are gone. * * This is called when unregistering network devices. * * Any protocol or device that holds a reference should register * for netdevice notification, and cleanup and put back the * reference if they receive an UNREGISTER event. * We can get stuck here if buggy protocols don't correctly * call dev_put. */ static void netdev_wait_allrefs(struct net_device *dev) { unsigned long rebroadcast_time, warning_time; int refcnt; linkwatch_forget_dev(dev); rebroadcast_time = warning_time = jiffies; refcnt = netdev_refcnt_read(dev); while (refcnt != 0) { if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { rtnl_lock(); /* Rebroadcast unregister notification */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users * should have already handle it the first time */ if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { /* We must not have linkwatch events * pending on unregister. If this * happens, we simply run the queue * unscheduled, resulting in a noop * for this device. */ linkwatch_run_queue(); } __rtnl_unlock(); rebroadcast_time = jiffies; } msleep(250); refcnt = netdev_refcnt_read(dev); if (time_after(jiffies, warning_time + 10 * HZ)) { pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", dev->name, refcnt); warning_time = jiffies; } } } /* The sequence is: * * rtnl_lock(); * ... * register_netdevice(x1); * register_netdevice(x2); * ... * unregister_netdevice(y1); * unregister_netdevice(y2); * ... * rtnl_unlock(); * free_netdev(y1); * free_netdev(y2); * * We are invoked by rtnl_unlock(). * This allows us to deal with problems: * 1) We can delete sysfs objects which invoke hotplug * without deadlocking with linkwatch via keventd. * 2) Since we run with the RTNL semaphore not held, we can sleep * safely in order to wait for the netdev refcnt to drop to zero. * * We must not return until all unregister events added during * the interval the lock was held have been completed. */ void netdev_run_todo(void) { struct list_head list; /* Snapshot list, allow later requests */ list_replace_init(&net_todo_list, &list); __rtnl_unlock(); /* Wait for rcu callbacks to finish before attempting to drain * the device list. This usually avoids a 250ms wait. */ if (!list_empty(&list)) rcu_barrier(); while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); list_del(&dev->todo_list); if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { pr_err("network todo '%s' but state %d\n", dev->name, dev->reg_state); dump_stack(); continue; } dev->reg_state = NETREG_UNREGISTERED; on_each_cpu(flush_backlog, dev, 1); netdev_wait_allrefs(dev); /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); if (dev->destructor) dev->destructor(dev); /* Free network device */ kobject_put(&dev->dev.kobj); } } /* Convert net_device_stats to rtnl_link_stats64. They have the same * fields in the same order, with only the type differing. */ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats) { #if BITS_PER_LONG == 64 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); memcpy(stats64, netdev_stats, sizeof(*stats64)); #else size_t i, n = sizeof(*stats64) / sizeof(u64); const unsigned long *src = (const unsigned long *)netdev_stats; u64 *dst = (u64 *)stats64; BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != sizeof(*stats64) / sizeof(u64)); for (i = 0; i < n; i++) dst[i] = src[i]; #endif } EXPORT_SYMBOL(netdev_stats_to_stats64); /** * dev_get_stats - get network device statistics * @dev: device to get statistics from * @storage: place to store stats * * Get network statistics from device. Return @storage. * The device driver may provide its own method by setting * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; * otherwise the internal statistics structure is used. */ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_get_stats64) { memset(storage, 0, sizeof(*storage)); ops->ndo_get_stats64(dev, storage); } else if (ops->ndo_get_stats) { netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else { netdev_stats_to_stats64(storage, &dev->stats); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); return storage; } EXPORT_SYMBOL(dev_get_stats); struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) { struct netdev_queue *queue = dev_ingress_queue(dev); #ifdef CONFIG_NET_CLS_ACT if (queue) return queue; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) return NULL; netdev_init_one_queue(dev, queue, NULL); queue->qdisc = &noop_qdisc; queue->qdisc_sleeping = &noop_qdisc; rcu_assign_pointer(dev->ingress_queue, queue); #endif return queue; } /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for * @name: device name format string * @setup: callback to initialize device * @txqs: the number of TX subqueues to allocate * @rxqs: the number of RX subqueues to allocate * * Allocates a struct net_device with private data area for driver use * and performs basic initialization. Also allocates subquue structs * for each queue on the device. */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs) { struct net_device *dev; size_t alloc_size; struct net_device *p; BUG_ON(strlen(name) >= sizeof(dev->name)); if (txqs < 1) { pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); return NULL; } #ifdef CONFIG_RPS if (rxqs < 1) { pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); return NULL; } #endif alloc_size = sizeof(struct net_device); if (sizeof_priv) { /* ensure 32-byte alignment of private area */ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); alloc_size += sizeof_priv; } /* ensure 32-byte alignment of whole construct */ alloc_size += NETDEV_ALIGN - 1; p = kzalloc(alloc_size, GFP_KERNEL); if (!p) { pr_err("alloc_netdev: Unable to allocate device\n"); return NULL; } dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) goto free_p; if (dev_addr_init(dev)) goto free_pcpu; dev_mc_init(dev); dev_uc_init(dev); dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); INIT_LIST_HEAD(&dev->link_watch_list); dev->priv_flags = IFF_XMIT_DST_RELEASE; setup(dev); dev->num_tx_queues = txqs; dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) goto free_all; #ifdef CONFIG_RPS dev->num_rx_queues = rxqs; dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_all; #endif strcpy(dev->name, name); dev->group = INIT_NETDEV_GROUP; return dev; free_all: free_netdev(dev); return NULL; free_pcpu: free_percpu(dev->pcpu_refcnt); kfree(dev->_tx); #ifdef CONFIG_RPS kfree(dev->_rx); #endif free_p: kfree(p); return NULL; } EXPORT_SYMBOL(alloc_netdev_mqs); /** * free_netdev - free network device * @dev: device * * This function does the last stage of destroying an allocated device * interface. The reference to the device object is released. * If this is the last reference then it will be freed. */ void free_netdev(struct net_device *dev) { struct napi_struct *p, *n; release_net(dev_net(dev)); kfree(dev->_tx); #ifdef CONFIG_RPS kfree(dev->_rx); #endif kfree(rcu_dereference_protected(dev->ingress_queue, 1)); /* Flush device addresses */ dev_addr_flush(dev); list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) netif_napi_del(p); free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED) { kfree((char *)dev - dev->padded); return; } BUG_ON(dev->reg_state != NETREG_UNREGISTERED); dev->reg_state = NETREG_RELEASED; /* will free via device release */ put_device(&dev->dev); } EXPORT_SYMBOL(free_netdev); /** * synchronize_net - Synchronize with packet receive processing * * Wait for packets currently being received to be done. * Does not block later packets from starting. */ void synchronize_net(void) { might_sleep(); if (rtnl_is_locked()) synchronize_rcu_expedited(); else synchronize_rcu(); } EXPORT_SYMBOL(synchronize_net); /** * unregister_netdevice_queue - remove device from the kernel * @dev: device * @head: list * * This function shuts down a device interface and removes it * from the kernel tables. * If head not NULL, device is queued to be unregistered later. * * Callers must hold the rtnl semaphore. You may want * unregister_netdev() instead of this. */ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) { ASSERT_RTNL(); if (head) { list_move_tail(&dev->unreg_list, head); } else { rollback_registered(dev); /* Finish processing unregister after unlock */ net_set_todo(dev); } } EXPORT_SYMBOL(unregister_netdevice_queue); /** * unregister_netdevice_many - unregister many devices * @head: list of devices */ void unregister_netdevice_many(struct list_head *head) { struct net_device *dev; if (!list_empty(head)) { rollback_registered_many(head); list_for_each_entry(dev, head, unreg_list) net_set_todo(dev); } } EXPORT_SYMBOL(unregister_netdevice_many); /** * unregister_netdev - remove device from the kernel * @dev: device * * This function shuts down a device interface and removes it * from the kernel tables. * * This is just a wrapper for unregister_netdevice that takes * the rtnl semaphore. In general you want to use this and not * unregister_netdevice. */ void unregister_netdev(struct net_device *dev) { rtnl_lock(); unregister_netdevice(dev); rtnl_unlock(); } EXPORT_SYMBOL(unregister_netdev); /** * dev_change_net_namespace - move device to different nethost namespace * @dev: device * @net: network namespace * @pat: If not NULL name pattern to try if the current device name * is already taken in the destination network namespace. * * This function shuts down a device interface and moves it * to a new network namespace. On success 0 is returned, on * a failure a netagive errno code is returned. * * Callers must hold the rtnl semaphore. */ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) { int err; ASSERT_RTNL(); /* Don't allow namespace local devices to be moved. */ err = -EINVAL; if (dev->features & NETIF_F_NETNS_LOCAL) goto out; /* Ensure the device has been registrered */ err = -EINVAL; if (dev->reg_state != NETREG_REGISTERED) goto out; /* Get out if there is nothing todo */ err = 0; if (net_eq(dev_net(dev), net)) goto out; /* Pick the destination device name, and ensure * we can use it in the destination network namespace. */ err = -EEXIST; if (__dev_get_by_name(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; if (dev_get_valid_name(dev, pat) < 0) goto out; } /* * And now a mini version of register_netdevice unregister_netdevice. */ /* If device is running close it first. */ dev_close(dev); /* And unlink it from device chain */ err = -ENODEV; unlist_netdevice(dev); synchronize_net(); /* Shutdown queueing discipline. */ dev_shutdown(dev); /* Notify protocols, that we are about to destroy this device. They should clean all the things. Note that dev->reg_state stays at NETREG_REGISTERED. This is wanted because this way 8021q and macvlan know the device is just moving and can keep their slaves up. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); /* * Flush the unicast and multicast chains */ dev_uc_flush(dev); dev_mc_flush(dev); /* Actually switch the network namespace */ dev_net_set(dev, net); /* If there is an ifindex conflict assign a new one */ if (__dev_get_by_index(net, dev->ifindex)) { int iflink = (dev->iflink == dev->ifindex); dev->ifindex = dev_new_index(net); if (iflink) dev->iflink = dev->ifindex; } /* Fixup kobjects */ err = device_rename(&dev->dev, dev->name); WARN_ON(err); /* Add the device back in the hashes */ list_netdevice(dev); /* Notify protocols, that a new device appeared. */ call_netdevice_notifiers(NETDEV_REGISTER, dev); /* * Prevent userspace races by waiting until the network * device is fully setup before sending notifications. */ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); synchronize_net(); err = 0; out: return err; } EXPORT_SYMBOL_GPL(dev_change_net_namespace); static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) { struct sk_buff **list_skb; struct sk_buff *skb; unsigned int cpu, oldcpu = (unsigned long)ocpu; struct softnet_data *sd, *oldsd; if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) return NOTIFY_OK; local_irq_disable(); cpu = smp_processor_id(); sd = &per_cpu(softnet_data, cpu); oldsd = &per_cpu(softnet_data, oldcpu); /* Find end of our completion_queue. */ list_skb = &sd->completion_queue; while (*list_skb) list_skb = &(*list_skb)->next; /* Append completion queue from offline CPU. */ *list_skb = oldsd->completion_queue; oldsd->completion_queue = NULL; /* Append output queue from offline CPU. */ if (oldsd->output_queue) { *sd->output_queue_tailp = oldsd->output_queue; sd->output_queue_tailp = oldsd->output_queue_tailp; oldsd->output_queue = NULL; oldsd->output_queue_tailp = &oldsd->output_queue; } /* Append NAPI poll list from offline CPU. */ if (!list_empty(&oldsd->poll_list)) { list_splice_init(&oldsd->poll_list, &sd->poll_list); raise_softirq_irqoff(NET_RX_SOFTIRQ); } raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx(skb); input_queue_head_incr(oldsd); } while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx(skb); input_queue_head_incr(oldsd); } return NOTIFY_OK; } /** * netdev_increment_features - increment feature set by one * @all: current feature set * @one: new feature set * @mask: mask feature set * * Computes a new feature set after adding a device with feature set * @one to the master device with current feature set @all. Will not * enable anything that is off in @mask. Returns the new feature set. */ netdev_features_t netdev_increment_features(netdev_features_t all, netdev_features_t one, netdev_features_t mask) { if (mask & NETIF_F_GEN_CSUM) mask |= NETIF_F_ALL_CSUM; mask |= NETIF_F_VLAN_CHALLENGED; all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; all &= one | ~NETIF_F_ALL_FOR_ALL; /* If one device supports hw checksumming, set for all. */ if (all & NETIF_F_GEN_CSUM) all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); return all; } EXPORT_SYMBOL(netdev_increment_features); static struct hlist_head *netdev_create_hash(void) { int i; struct hlist_head *hash; hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); if (hash != NULL) for (i = 0; i < NETDEV_HASHENTRIES; i++) INIT_HLIST_HEAD(&hash[i]); return hash; } /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { INIT_LIST_HEAD(&net->dev_base_head); net->dev_name_head = netdev_create_hash(); if (net->dev_name_head == NULL) goto err_name; net->dev_index_head = netdev_create_hash(); if (net->dev_index_head == NULL) goto err_idx; return 0; err_idx: kfree(net->dev_name_head); err_name: return -ENOMEM; } /** * netdev_drivername - network driver for the device * @dev: network device * * Determine network driver for device. */ const char *netdev_drivername(const struct net_device *dev) { const struct device_driver *driver; const struct device *parent; const char *empty = ""; parent = dev->dev.parent; if (!parent) return empty; driver = parent->driver; if (driver && driver->name) return driver->name; return empty; } int __netdev_printk(const char *level, const struct net_device *dev, struct va_format *vaf) { int r; if (dev && dev->dev.parent) r = dev_printk(level, dev->dev.parent, "%s: %pV", netdev_name(dev), vaf); else if (dev) r = printk("%s%s: %pV", level, netdev_name(dev), vaf); else r = printk("%s(NULL net_device): %pV", level, vaf); return r; } EXPORT_SYMBOL(__netdev_printk); int netdev_printk(const char *level, const struct net_device *dev, const char *format, ...) { struct va_format vaf; va_list args; int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; r = __netdev_printk(level, dev, &vaf); va_end(args); return r; } EXPORT_SYMBOL(netdev_printk); #define define_netdev_printk_level(func, level) \ int func(const struct net_device *dev, const char *fmt, ...) \ { \ int r; \ struct va_format vaf; \ va_list args; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ r = __netdev_printk(level, dev, &vaf); \ va_end(args); \ \ return r; \ } \ EXPORT_SYMBOL(func); define_netdev_printk_level(netdev_emerg, KERN_EMERG); define_netdev_printk_level(netdev_alert, KERN_ALERT); define_netdev_printk_level(netdev_crit, KERN_CRIT); define_netdev_printk_level(netdev_err, KERN_ERR); define_netdev_printk_level(netdev_warn, KERN_WARNING); define_netdev_printk_level(netdev_notice, KERN_NOTICE); define_netdev_printk_level(netdev_info, KERN_INFO); static void __net_exit netdev_exit(struct net *net) { kfree(net->dev_name_head); kfree(net->dev_index_head); } static struct pernet_operations __net_initdata netdev_net_ops = { .init = netdev_init, .exit = netdev_exit, }; static void __net_exit default_device_exit(struct net *net) { struct net_device *dev, *aux; /* * Push all migratable network devices back to the * initial network namespace */ rtnl_lock(); for_each_netdev_safe(net, dev, aux) { int err; char fb_name[IFNAMSIZ]; /* Ignore unmoveable devices (i.e. loopback) */ if (dev->features & NETIF_F_NETNS_LOCAL) continue; /* Leave virtual devices for the generic cleanup */ if (dev->rtnl_link_ops) continue; /* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { pr_emerg("%s: failed to move %s to init_net: %d\n", __func__, dev->name, err); BUG(); } } rtnl_unlock(); } static void __net_exit default_device_exit_batch(struct list_head *net_list) { /* At exit all network devices most be removed from a network * namespace. Do this in the reverse order of registration. * Do this across as many network namespaces as possible to * improve batching efficiency. */ struct net_device *dev; struct net *net; LIST_HEAD(dev_kill_list); rtnl_lock(); list_for_each_entry(net, net_list, exit_list) { for_each_netdev_reverse(net, dev) { if (dev->rtnl_link_ops) dev->rtnl_link_ops->dellink(dev, &dev_kill_list); else unregister_netdevice_queue(dev, &dev_kill_list); } } unregister_netdevice_many(&dev_kill_list); list_del(&dev_kill_list); rtnl_unlock(); } static struct pernet_operations __net_initdata default_device_ops = { .exit = default_device_exit, .exit_batch = default_device_exit_batch, }; /* * Initialize the DEV module. At boot time this walks the device list and * unhooks any devices that fail to initialise (normally hardware not * present) and leaves us with a valid list of present and active devices. * */ /* * This is called single threaded during boot, so no need * to take the rtnl semaphore. */ static int __init net_dev_init(void) { int i, rc = -ENOMEM; BUG_ON(!dev_boot_phase); if (dev_proc_init()) goto out; if (netdev_kobject_init()) goto out; INIT_LIST_HEAD(&ptype_all); for (i = 0; i < PTYPE_HASH_SIZE; i++) INIT_LIST_HEAD(&ptype_base[i]); if (register_pernet_subsys(&netdev_net_ops)) goto out; /* * Initialise the packet receive queues. */ for_each_possible_cpu(i) { struct softnet_data *sd = &per_cpu(softnet_data, i); memset(sd, 0, sizeof(*sd)); skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->process_queue); sd->completion_queue = NULL; INIT_LIST_HEAD(&sd->poll_list); sd->output_queue = NULL; sd->output_queue_tailp = &sd->output_queue; #ifdef CONFIG_RPS sd->csd.func = rps_trigger_softirq; sd->csd.info = sd; sd->csd.flags = 0; sd->cpu = i; #endif sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; sd->backlog.gro_list = NULL; sd->backlog.gro_count = 0; } dev_boot_phase = 0; /* The loopback device is special if any other network devices * is present in a network namespace the loopback device must * be present. Since we now dynamically allocate and free the * loopback device ensure this invariant is maintained by * keeping the loopback device as the first device on the * list of network devices. Ensuring the loopback devices * is the first device that appears and the last network device * that disappears. */ if (register_pernet_device(&loopback_net_ops)) goto out; if (register_pernet_device(&default_device_ops)) goto out; open_softirq(NET_TX_SOFTIRQ, net_tx_action); open_softirq(NET_RX_SOFTIRQ, net_rx_action); hotcpu_notifier(dev_cpu_callback, 0); dst_init(); dev_mcast_init(); rc = 0; out: return rc; } subsys_initcall(net_dev_init); static int __init initialize_hashrnd(void) { get_random_bytes(&hashrnd, sizeof(hashrnd)); return 0; } late_initcall_sync(initialize_hashrnd);
gpl-2.0
pazos/android_kernel_samsung_goyawifi
drivers/tty/serial/msm_smd_tty.c
1545
5605
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <mach/msm_smd.h> #define MAX_SMD_TTYS 32 struct smd_tty_info { struct tty_port port; smd_channel_t *ch; }; struct smd_tty_channel_desc { int id; const char *name; }; static struct smd_tty_info smd_tty[MAX_SMD_TTYS]; static const struct smd_tty_channel_desc smd_default_tty_channels[] = { { .id = 0, .name = "SMD_DS" }, { .id = 27, .name = "SMD_GPSNMEA" }, }; static const struct smd_tty_channel_desc *smd_tty_channels = smd_default_tty_channels; static int smd_tty_channels_len = ARRAY_SIZE(smd_default_tty_channels); static void smd_tty_notify(void *priv, unsigned event) { unsigned char *ptr; int avail; struct smd_tty_info *info = priv; struct tty_struct *tty; if (event != SMD_EVENT_DATA) return; tty = tty_port_tty_get(&info->port); if (!tty) return; for (;;) { if (test_bit(TTY_THROTTLED, &tty->flags)) break; avail = smd_read_avail(info->ch); if (avail == 0) break; avail = tty_prepare_flip_string(tty, &ptr, avail); if (smd_read(info->ch, ptr, avail) != avail) { /* shouldn't be possible since we're in interrupt ** context here and nobody else could 'steal' our ** characters. */ pr_err("OOPS - smd_tty_buffer mismatch?!"); } tty_flip_buffer_push(tty); } /* XXX only when writable and necessary */ tty_wakeup(tty); tty_kref_put(tty); } static int smd_tty_port_activate(struct tty_port *tport, struct tty_struct *tty) { int i, res = 0; int n = tty->index; const char *name = NULL; struct smd_tty_info *info = smd_tty + n; for (i = 0; i < smd_tty_channels_len; i++) { if (smd_tty_channels[i].id == n) { name = smd_tty_channels[i].name; break; } } if (!name) return -ENODEV; if (info->ch) smd_kick(info->ch); else res = smd_open(name, &info->ch, info, smd_tty_notify); if (!res) tty->driver_data = info; return res; } static void smd_tty_port_shutdown(struct tty_port *tport) { struct smd_tty_info *info; struct tty_struct *tty = tty_port_tty_get(tport); info = tty->driver_data; if (info->ch) { smd_close(info->ch); info->ch = 0; } tty->driver_data = 0; tty_kref_put(tty); } static int smd_tty_open(struct tty_struct *tty, struct file *f) { struct smd_tty_info *info = smd_tty + tty->index; return tty_port_open(&info->port, tty, f); } static void smd_tty_close(struct tty_struct *tty, struct file *f) { struct smd_tty_info *info = tty->driver_data; tty_port_close(&info->port, tty, f); } static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len) { struct smd_tty_info *info = tty->driver_data; int avail; /* if we're writing to a packet channel we will ** never be able to write more data than there ** is currently space for */ avail = smd_write_avail(info->ch); if (len > avail) len = avail; return smd_write(info->ch, buf, len); } static int smd_tty_write_room(struct tty_struct *tty) { struct smd_tty_info *info = tty->driver_data; return smd_write_avail(info->ch); } static int smd_tty_chars_in_buffer(struct tty_struct *tty) { struct smd_tty_info *info = tty->driver_data; return smd_read_avail(info->ch); } static void smd_tty_unthrottle(struct tty_struct *tty) { struct smd_tty_info *info = tty->driver_data; smd_kick(info->ch); } static const struct tty_port_operations smd_tty_port_ops = { .shutdown = smd_tty_port_shutdown, .activate = smd_tty_port_activate, }; static const struct tty_operations smd_tty_ops = { .open = smd_tty_open, .close = smd_tty_close, .write = smd_tty_write, .write_room = smd_tty_write_room, .chars_in_buffer = smd_tty_chars_in_buffer, .unthrottle = smd_tty_unthrottle, }; static struct tty_driver *smd_tty_driver; static int __init smd_tty_init(void) { int ret, i; smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS); if (smd_tty_driver == 0) return -ENOMEM; smd_tty_driver->driver_name = "smd_tty_driver"; smd_tty_driver->name = "smd"; smd_tty_driver->major = 0; smd_tty_driver->minor_start = 0; smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; smd_tty_driver->subtype = SERIAL_TYPE_NORMAL; smd_tty_driver->init_termios = tty_std_termios; smd_tty_driver->init_termios.c_iflag = 0; smd_tty_driver->init_termios.c_oflag = 0; smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; smd_tty_driver->init_termios.c_lflag = 0; smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_set_operations(smd_tty_driver, &smd_tty_ops); ret = tty_register_driver(smd_tty_driver); if (ret) return ret; for (i = 0; i < smd_tty_channels_len; i++) { tty_port_init(&smd_tty[smd_tty_channels[i].id].port); smd_tty[smd_tty_channels[i].id].port.ops = &smd_tty_port_ops; tty_register_device(smd_tty_driver, smd_tty_channels[i].id, 0); } return 0; } module_init(smd_tty_init);
gpl-2.0
xingrz/android_kernel_nubia_msm8996
drivers/gpu/drm/radeon/atombios_i2c.c
1801
4406
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher * */ #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon.h" #include "atom.h" #define TARGET_HW_I2C_CLOCK 50 /* these are a limitation of ProcessI2cChannelTransaction not the hw */ #define ATOM_MAX_HW_I2C_WRITE 3 #define ATOM_MAX_HW_I2C_READ 255 static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, u8 slave_addr, u8 flags, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; u16 out = cpu_to_le16(0); int r = 0; memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); base = (unsigned char *)rdev->mode_info.atom_context->scratch; if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); r = -EINVAL; goto done; } if (buf == NULL) args.ucRegIndex = 0; else args.ucRegIndex = buf[0]; if (num) num--; if (num) memcpy(&out, &buf[1], num); args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); r = -EINVAL; goto done; } args.ucRegIndex = 0; args.lpI2CDataOut = 0; } args.ucFlag = flags; args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; args.ucTransBytes = num; args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { DRM_DEBUG_KMS("hw_i2c error\n"); r = -EIO; goto done; } if (!(flags & HW_I2C_WRITE)) radeon_atom_copy_swap(buf, base, num, false); done: mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); mutex_unlock(&chan->mutex); return r; } int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct i2c_msg *p; int i, remaining, current_count, buffer_offset, max_bytes, ret; u8 flags; /* check for bus probe */ p = &msgs[0]; if ((num == 1) && (p->len == 0)) { ret = radeon_process_i2c_ch(i2c, p->addr, HW_I2C_WRITE, NULL, 0); if (ret) return ret; else return num; } for (i = 0; i < num; i++) { p = &msgs[i]; remaining = p->len; buffer_offset = 0; /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */ if (p->flags & I2C_M_RD) { max_bytes = ATOM_MAX_HW_I2C_READ; flags = HW_I2C_READ; } else { max_bytes = ATOM_MAX_HW_I2C_WRITE; flags = HW_I2C_WRITE; } while (remaining) { if (remaining > max_bytes) current_count = max_bytes; else current_count = remaining; ret = radeon_process_i2c_ch(i2c, p->addr, flags, &p->buf[buffer_offset], current_count); if (ret) return ret; remaining -= current_count; buffer_offset += current_count; } } return num; } u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; }
gpl-2.0
ubports/android_kernel_oneplus_one
arch/powerpc/mm/fsl_booke_mmu.c
6921
6618
/* * Modifications by Kumar Gala (galak@kernel.crashing.org) to support * E500 Book E processors. * * Copyright 2004,2010 Freescale Semiconductor, Inc. * * This file contains the routines for initializing the MMU * on the 4xx series of chips. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/uaccess.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/setup.h> #include "mmu_decl.h" unsigned int tlbcam_index; #define NUM_TLBCAMS (64) struct tlbcam TLBCAM[NUM_TLBCAMS]; struct tlbcamrange { unsigned long start; unsigned long limit; phys_addr_t phys; } tlbcam_addrs[NUM_TLBCAMS]; extern unsigned int tlbcam_index; unsigned long tlbcam_sz(int idx) { return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; } /* * Return PA for this VA if it is mapped by a CAM, or 0 */ phys_addr_t v_mapped_by_tlbcam(unsigned long va) { int b; for (b = 0; b < tlbcam_index; ++b) if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); return 0; } /* * Return VA for a given PA or 0 if not mapped */ unsigned long p_mapped_by_tlbcam(phys_addr_t pa) { int b; for (b = 0; b < tlbcam_index; ++b) if (pa >= tlbcam_addrs[b].phys && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) +tlbcam_addrs[b].phys) return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); return 0; } /* * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; * in particular size must be a power of 4 between 4k and the max supported by * an implementation; max may further be limited by what can be represented in * an unsigned long (for example, 32-bit implementations cannot support a 4GB * size). */ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, unsigned long size, unsigned long flags, unsigned int pid) { unsigned int tsize; tsize = __ilog2(size) - 10; #ifdef CONFIG_SMP if ((flags & _PAGE_NO_CACHE) == 0) flags |= _PAGE_COHERENT; #endif TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); TLBCAM[index].MAS2 = virt & PAGE_MASK; TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR; TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0); if (mmu_has_feature(MMU_FTR_BIG_PHYS)) TLBCAM[index].MAS7 = (u64)phys >> 32; /* Below is unlikely -- only for large user pages or similar */ if (pte_user(flags)) { TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); } tlbcam_addrs[index].start = virt; tlbcam_addrs[index].limit = virt + size - 1; tlbcam_addrs[index].phys = phys; loadcam_entry(index); } unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, phys_addr_t phys) { unsigned int camsize = __ilog2(ram); unsigned int align = __ffs(virt | phys); unsigned long max_cam; if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { /* Convert (4^max) kB to (2^max) bytes */ max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; camsize &= ~1U; align &= ~1U; } else { /* Convert (2^max) kB to (2^max) bytes */ max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; } if (camsize > align) camsize = align; if (camsize > max_cam) camsize = max_cam; return 1UL << camsize; } unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) { int i; unsigned long virt = PAGE_OFFSET; phys_addr_t phys = memstart_addr; unsigned long amount_mapped = 0; /* Calculate CAM values */ for (i = 0; ram && i < max_cam_idx; i++) { unsigned long cam_sz; cam_sz = calc_cam_sz(ram, virt, phys); settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0); ram -= cam_sz; amount_mapped += cam_sz; virt += cam_sz; phys += cam_sz; } tlbcam_index = i; return amount_mapped; } #ifdef CONFIG_PPC32 #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" #endif unsigned long __init mmu_mapin_ram(unsigned long top) { return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { flush_instruction_cache(); } void __init adjust_total_lowmem(void) { unsigned long ram; int i; /* adjust lowmem size to __max_low_memory */ ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) pr_cont("%lu/", tlbcam_sz(i) >> 20); pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, (unsigned int)((total_lowmem - __max_low_memory) >> 20)); memblock_set_current_limit(memstart_addr + __max_low_memory); } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { phys_addr_t limit = first_memblock_base + first_memblock_size; /* 64M mapped initially according to head_fsl_booke.S */ memblock_set_current_limit(min_t(u64, limit, 0x04000000)); } #endif
gpl-2.0
varunchitre15/thunderzap_sprout
drivers/usb/core/hub.c
10
180895
/* * USB hub driver. * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999 Johannes Erdfelt * (C) Copyright 1999 Gregory P. Smith * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au) * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/completion.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> #include <linux/usb/otg.h> #include <linux/usb/quirks.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <linux/random.h> #include <linux/pm_qos.h> #include <asm/uaccess.h> #include <asm/byteorder.h> #include "hub.h" /* if we are in debug mode, always announce new devices */ #ifdef DEBUG #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES #define CONFIG_USB_ANNOUNCE_NEW_DEVICES #endif #endif #define USB_VENDOR_GENESYS_LOGIC 0x05e3 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 static inline int hub_is_superspeed(struct usb_device *hdev) { return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS); } /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ static DEFINE_SPINLOCK(device_state_lock); /* khubd's worklist and its lock */ static DEFINE_SPINLOCK(hub_event_lock); static LIST_HEAD(hub_event_list); /* List of hubs needing servicing */ /* Wakes up khubd */ static DECLARE_WAIT_QUEUE_HEAD(khubd_wait); static struct task_struct *khubd_task; /* cycle leds on hubs that aren't blinking for attention */ static bool blinkenlights = 0; module_param (blinkenlights, bool, S_IRUGO); MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs"); /* * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about * 10 seconds to send reply for the initial 64-byte descriptor request. */ /* define initial 64-byte descriptor request timeout in milliseconds */ static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT; module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(initial_descriptor_timeout, "initial 64-byte descriptor request timeout in milliseconds " "(default 5000 - 5.0 seconds)"); /* * As of 2.6.10 we introduce a new USB device initialization scheme which * closely resembles the way Windows works. Hopefully it will be compatible * with a wider range of devices than the old scheme. However some previously * working devices may start giving rise to "device not accepting address" * errors; if that happens the user can try the old scheme by adjusting the * following module parameters. * * For maximum flexibility there are two boolean parameters to control the * hub driver's behavior. On the first initialization attempt, if the * "old_scheme_first" parameter is set then the old scheme will be used, * otherwise the new scheme is used. If that fails and "use_both_schemes" * is set, then the driver will make another attempt, using the other scheme. */ static bool old_scheme_first = 0; module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(old_scheme_first, "start with the old device initialization scheme"); static bool use_both_schemes = 1; module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(use_both_schemes, "try the other device initialization scheme if the " "first one fails"); /* Mutual exclusion for EHCI CF initialization. This interferes with * port reset on some companion controllers. */ DECLARE_RWSEM(ehci_cf_port_reset_rwsem); EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); #define HUB_DEBOUNCE_TIMEOUT 2000 #define HUB_DEBOUNCE_STEP 25 #define HUB_DEBOUNCE_STABLE 100 static int usb_reset_and_verify_device(struct usb_device *udev); #define usb_sndaddr0pipe() (PIPE_CONTROL << 30) #define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN) //#define ORG_SUSPEND_RESUME_TEST #ifdef ORG_SUSPEND_RESUME_TEST #include <linux/proc_fs.h> #include <linux/uaccess.h> #endif #ifdef CONFIG_MTK_ICUSB_SUPPORT #define DEBUG #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <net/sock.h> #include <net/netlink.h> #include <linux/skbuff.h> static struct sock *netlink_sock; static u_int g_pid; static struct usb_hub *gt_rootHub = NULL; static struct usb_device *g_sim_dev = NULL; static struct IC_USB_CMD ic_cmd; unsigned int g_ic_usb_status; void musbfsh_start_session(); void musbfsh_start_session_pure(void ); void musbfsh_stop_session(); void musbfsh_init_phy_by_voltage(enum PHY_VOLTAGE_TYPE); void usb11_phy_set_test(void); void usb11_phy_33V_bias_control(int enable); void mt65xx_usb11_phy_poweron_volt_30(void); enum PHY_VOLTAGE_TYPE get_usb11_phy_voltage(void); void mt65xx_usb11_mac_reset_and_phy_stress_set(void); void musbfsh_root_disc_procedure(void); int is_usb11_enabled(void); void mt65xx_usb11_suspend_resume_test(void); void mt65xx_usb20_suspend_resume_test(void); void create_ic_tmp_entry(void); extern struct my_attr power_resume_time_neogo_attr; extern struct my_attr skip_session_req_attr; extern struct my_attr skip_enable_session_attr; extern struct my_attr skip_mac_init_attr; extern struct my_attr resistor_control_attr; extern struct my_attr hw_dbg_attr; extern struct my_attr skip_port_pm_attr; static struct my_attr my_attr_test = { .attr.name = "my_attr_test", .attr.mode = 0644, .value = 1 }; static struct attribute *myattr[] = { &my_attr_test, &power_resume_time_neogo_attr, &skip_session_req_attr, &skip_enable_session_attr, &skip_mac_init_attr, &resistor_control_attr, &hw_dbg_attr, &skip_port_pm_attr, NULL }; static ssize_t default_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct my_attr *a = container_of(attr, struct my_attr, attr); return scnprintf(buf, PAGE_SIZE, "%d\n", a->value); } static ssize_t default_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct my_attr *a = container_of(attr, struct my_attr, attr); sscanf(buf, "%d", &a->value); return sizeof(int); } static struct sysfs_ops myops = { .show = default_show, .store = default_store, }; static struct kobj_type mytype = { .sysfs_ops = &myops, .default_attrs = myattr, }; struct kobject *mykobj; void create_icusb_sysfs_attr(void) { int err = -1; mykobj = kzalloc(sizeof(*mykobj), GFP_KERNEL); if (mykobj) { MYDBG(""); kobject_init(mykobj, &mytype); if (kobject_add(mykobj, NULL, "%s", "icusb_attr")) { err = -1; MYDBG("Sysfs creation failed\n"); kobject_put(mykobj); mykobj = NULL; } err = 0; } return err; } void my_attr_test_procedure(void) { if(my_attr_test.value) { MYDBG("my_attr_test.value != 0 \n"); } else { MYDBG("my_attr_test.value == 0 \n"); } } char *get_root_hub_udev(void) { if(gt_rootHub) { MYDBG(""); return (char *)(gt_rootHub->hdev); } else { MYDBG(""); return NULL; } } char *get_usb_sim_udev(void) { return (char *)(g_sim_dev); } void usb11_wait_disconnect_done(int value) { if(is_usb11_enabled()) { while(1) { unsigned int ic_usb_status = g_ic_usb_status; MYDBG("ic_usb_status : %x\n", ic_usb_status); ic_usb_status &= (USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT); MYDBG("ic_usb_status : %x\n", ic_usb_status); if(ic_usb_status == (USB_PORT1_DISCONNECT_DONE << USB_PORT1_STS_SHIFT)) { MYDBG("USB_PORT1_DISCONNECT_DONE\n"); break; } if(ic_usb_status == (USB_PORT1_DISCONNECTING << USB_PORT1_STS_SHIFT)) { MYDBG("USB_PORT1_DISCONNECTING\n"); } msleep(10); } } else { MYDBG("usb11 is not enabled, skip usb11_wait_disconnect_done()\n"); } } int check_usb11_sts_disconnect_done(void) { unsigned int ic_usb_status = g_ic_usb_status; MYDBG("ic_usb_status : %x\n", ic_usb_status); ic_usb_status &= (USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT); MYDBG("ic_usb_status : %x\n", ic_usb_status); if(ic_usb_status == (USB_PORT1_DISCONNECT_DONE << USB_PORT1_STS_SHIFT)) { MYDBG("USB_PORT1_DISCONNECT_DONE got\n"); return 1; } else { return 0; } } void set_usb11_sts_connect(void) { MYDBG("..................."); g_ic_usb_status &= ~(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT); g_ic_usb_status |= ((USB_PORT1_CONNECT) << USB_PORT1_STS_SHIFT); } void set_usb11_sts_disconnecting(void) { MYDBG("..................."); g_ic_usb_status &= ~(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT); g_ic_usb_status |= ((USB_PORT1_DISCONNECTING) << USB_PORT1_STS_SHIFT); } void set_usb11_sts_disconnect_done(void) { MYDBG("..................."); g_ic_usb_status &= ~(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT); g_ic_usb_status |= ((USB_PORT1_DISCONNECT_DONE) << USB_PORT1_STS_SHIFT); } void set_usb11_data_of_interface_power_request(short data) { MYDBG("..................."); g_ic_usb_status |= ((data) << PREFER_VOL_CLASS_SHIFT); } void reset_usb11_phy_power_negotiation_status(void) { MYDBG("..................."); g_ic_usb_status &= ~(PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT); g_ic_usb_status |= ((PREFER_VOL_NOT_INITED) << PREFER_VOL_STS_SHIFT); } void set_usb11_phy_power_negotiation_fail(void) { MYDBG("..................."); g_ic_usb_status &= ~(PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT); g_ic_usb_status |= ((PREFER_VOL_PWR_NEG_FAIL) << PREFER_VOL_STS_SHIFT); } void set_usb11_phy_power_negotiation_ok(void) { MYDBG("..................."); g_ic_usb_status &= ~(PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT); g_ic_usb_status |= ((PREFER_VOL_PWR_NEG_OK) << PREFER_VOL_STS_SHIFT); } void usb11_phy_prefer_3v_status_check(void) { unsigned int ic_usb_status = g_ic_usb_status; MYDBG("ic_usb_status : %x\n", ic_usb_status); ic_usb_status &= (PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT); MYDBG("ic_usb_status : %x\n", ic_usb_status); #if 0 if(ic_usb_status == (PREFER_VOL_NOT_INITED << PREFER_VOL_STS_SHIFT)) { MYDBG("PREFER_VOL_NOT_INITED\n"); } if(ic_usb_status == (PREFER_VOL_33_NEGATIVE << PREFER_VOL_STS_SHIFT)) { MYDBG("PREFER_VOL_33_NEGATIVE\n"); } if(ic_usb_status == (PREFER_VOL_33_POSITIVE << PREFER_VOL_STS_SHIFT)) { MYDBG("PREFER_VOL_33_POSITIVE\n"); } if(ic_usb_status == (PREFER_VOL_GET_FAIL << PREFER_VOL_STS_SHIFT)) { MYDBG("PREFER_VOL_GET_FAIL\n"); } #endif } int sprintf1(char * buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i=vsprintf(buf,fmt,args); va_end(args); return i; } static void udp_reply(int pid,int seq,void *payload) { struct sk_buff *skb; struct nlmsghdr *nlh; int size=strlen(payload)+1; int len = NLMSG_SPACE(size); void *data; int ret; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; //3.10 specific nlh = __nlmsg_put(skb, pid, seq, 0, size, 0); nlh->nlmsg_flags = 0; data=NLMSG_DATA(nlh); memcpy(data, payload, size); //3.10 specific NETLINK_CB(skb).portid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; /* unicast */ ret=netlink_unicast(netlink_sock, skb, pid, MSG_DONTWAIT); if (ret <0) { MYDBG("send failed\n"); } return; nlmsg_failure: /* Used by NLMSG_PUT */ if (skb) kfree_skb(skb); } /* Receive messages from netlink socket. */ static void udp_receive(struct sk_buff *skb) { u_int uid, pid, seq, sid; void *data; struct nlmsghdr *nlh; MYDBG(""); nlh = (struct nlmsghdr *)skb->data; /* global here */ g_pid = NETLINK_CREDS(skb)->pid; uid = NETLINK_CREDS(skb)->uid; seq = nlh->nlmsg_seq; data = NLMSG_DATA(nlh); MYDBG("recv skb from user space uid:%d pid:%d seq:%d,sid:%d\n",uid,g_pid,seq,sid); MYDBG("data is :%s\n",(char *)data); char reply_data[16]; sprintf(reply_data, "%d", g_pid); udp_reply(g_pid, 0, reply_data); } struct netlink_kernel_cfg nl_cfg = { .input = udp_receive, }; void dump_data(char *buf, int len) { int i; for(i =0 ; i< len ; i++) { MYDBG("data[%d]: %x\n", i, buf[i]); } } int usb11_init_phy_by_voltage(enum PHY_VOLTAGE_TYPE phy_volt) { musbfsh_init_phy_by_voltage(phy_volt); return 0; } int usb11_session_control(enum SESSION_CONTROL_ACTION action) { if(action == START_SESSION) musbfsh_start_session(); else if(action == STOP_SESSION) { //musbfsh_stop_session(); if(!is_usb11_enabled()) { mt65xx_usb11_mac_reset_and_phy_stress_set(); } else { MYDBG("usb11 has been enabled, skip mt65xx_usb11_mac_reset_and_phy_stress_set()\n"); } } else MYDBG("unknown action\n"); return 0; } static ssize_t musbfsh_ic_usb_cmd_proc_status_read(struct file *file_ptr, char __user *user_buffer, size_t count, loff_t *position) { int ret, len; MYDBG(""); if( copy_to_user(user_buffer, &g_ic_usb_status, sizeof(g_ic_usb_status)) != 0 ) { return -EFAULT; } // *position += count; len = sizeof(g_ic_usb_status); return len; } ssize_t musbfsh_ic_usb_cmd_proc_entry(struct file *file_ptr, char __user *user_buffer, size_t count, loff_t *position) { int ret = copy_from_user((char *) &ic_cmd, user_buffer, count); struct timeval tv_begin, tv_end; struct usb_device *udev; int result; do_gettimeofday(&tv_begin); MYDBG("==================>>>, len : %d, ret : %d\n", count, ret); if(ret != 0) { return -EFAULT; } udev = gt_rootHub->hdev; usb_lock_device(udev); result = usb_autoresume_device(udev); if (result < 0) { MYDBG("can't autoresume, result : %d\n", result); usb_autosuspend_device(udev); goto auto_resume_fail; } else { MYDBG("autoresume !!!, result : %d\n", result); } MYDBG("type : %x, length : %x, data[0] : %x\n", ic_cmd.type, ic_cmd.length, ic_cmd.data[0]); switch(ic_cmd.type) { case USB11_SESSION_CONTROL: MYDBG(""); usb11_session_control(ic_cmd.data[0]); break; case USB11_INIT_PHY_BY_VOLTAGE: MYDBG(""); usb11_init_phy_by_voltage(ic_cmd.data[0]); break; case USB11_WAIT_DISCONNECT_DONE: MYDBG(""); usb11_wait_disconnect_done(ic_cmd.data[0]); break; /*--- special purpose ---*/ case 's': MYDBG("create sysfs\n"); create_icusb_sysfs_attr(); break; case 't': MYDBG("create tmp proc\n"); create_ic_tmp_entry(); break; default: MYDBG("maybe u forget to add break\n"); usb11_phy_set_test(); break; } auto_resume_fail: usb_autosuspend_device(udev); usb_unlock_device(udev); do_gettimeofday(&tv_end); MYDBG("time spent, sec : %d, usec : %d, <<<===================\n", (tv_end.tv_sec - tv_begin.tv_sec), (tv_end.tv_usec - tv_begin.tv_usec)); return count; } struct file_operations musbfsh_ic_usb_cmd_proc_fops = { .read = musbfsh_ic_usb_cmd_proc_status_read, .write = musbfsh_ic_usb_cmd_proc_entry }; void ic_usb_test_device_ep0(char action) { int ret; char data_buf[256]; int result = usb_autoresume_device(g_sim_dev); MYDBG("action : %c\n", action); if (result < 0) { MYDBG("can't autoresume, result : %d\n", result); return -2; } else { MYDBG("autoresume ok, result : %d\n", result); } int i = 0; #define TEST_CNT 100000 switch (action) { case '1': ret = usb_control_msg(g_sim_dev, usb_rcvctrlpipe(g_sim_dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, data_buf, 64, USB_CTRL_GET_TIMEOUT); break; case '2': while(i++ < TEST_CNT) { ret = usb_control_msg(g_sim_dev, usb_rcvctrlpipe(g_sim_dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, data_buf, 64, USB_CTRL_GET_TIMEOUT); if (ret < 0) { MYDBG("test ep fail, ret : %d\n", ret); } else { MYDBG("test ep0 ok, ret : %d\n", ret); dump_data(data_buf, ret); } } break; default: break; } if (ret < 0) { MYDBG("test ep fail, ret : %d\n", ret); } else { MYDBG("test ep0 ok, ret : %d\n", ret); dump_data(data_buf, ret); } usb_autosuspend_device(g_sim_dev); } void ic_usb_test_hub_ep0(void) { int ret; char data_buf[256]; struct usb_device *udev; udev = gt_rootHub->hdev; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, data_buf, 64, USB_CTRL_GET_TIMEOUT); if (ret < 0) { MYDBG("test ep fail, ret : %d\n", ret); } else { MYDBG("test ep ok, ret : %d\n", ret); dump_data(data_buf, ret); } } static ssize_t musbfsh_ic_tmp_proc_entry(struct file *file_ptr, char __user *user_buffer, size_t count, loff_t *position) { char cmd[64]; int ret = copy_from_user((char *) &cmd, user_buffer, count); if(ret != 0) { return -EFAULT; } struct usb_device *udev; udev = gt_rootHub->hdev; usb_lock_device(udev); int result = usb_autoresume_device(udev); if (result < 0) { MYDBG("can't autoresume, result : %d\n", result); usb_autosuspend_device(udev); return -2; } else MYDBG("autoresume !!!, result : %d\n", result); /* apply action here */ if(cmd[0] == '0') { MYDBG(""); musbfsh_start_session_pure(); } else if(cmd[0] == '1') { MYDBG(""); mt65xx_usb11_phy_poweron_volt_30(); } else if(cmd[0] == '2') { MYDBG(""); ic_usb_test_hub_ep0(); } else if(cmd[0] == '3') { MYDBG(""); ic_usb_test_device_ep0(cmd[1]); } else if(cmd[0] == '4') { MYDBG(""); char payload[1024]; udp_reply(g_pid, 0, "HELLO, SS7_IC_USB!!!"); } else if(cmd[0] == '5') { MYDBG(""); } else if(cmd[0] == '6') { MYDBG(""); mt65xx_usb11_mac_reset_and_phy_stress_set(); } else if(cmd[0] == '7') { MYDBG(""); set_usb11_phy_power_negotiation_ok() ; } else if(cmd[0] == '8') { MYDBG(""); set_usb11_phy_power_negotiation_fail() ; } else if(cmd[0] == '9') { MYDBG(""); reset_usb11_phy_power_negotiation_status(); } else if(cmd[0] == 'a') { MYDBG(""); usb11_phy_prefer_3v_status_check(); } else if(cmd[0] == 'b') { MYDBG(""); musbfsh_root_disc_procedure(); } else if(cmd[0] == 'c') { MYDBG(""); set_usb11_sts_disconnecting(); } else if(cmd[0] == 'd') { MYDBG(""); set_usb11_sts_disconnect_done(); } else if(cmd[0] == 'e') { MYDBG(""); usb11_wait_disconnect_done(0); } else if(cmd[0] == 'f') { MYDBG("g_sim_dev :%x, gt_rootHub->hdev : %x\n", g_sim_dev, gt_rootHub->hdev); } else if(cmd[0] == 'g') { mt65xx_usb11_suspend_resume_test(); } else if(cmd[0] == 'h') { mt65xx_usb20_suspend_resume_test(); } else if(cmd[0] == 'i') { create_icusb_sysfs_attr(); } if(cmd[0] == 'z') { my_attr_test_procedure(); } usb_autosuspend_device(udev); usb_unlock_device(udev); MYDBG(""); return count; } struct file_operations musbfsh_ic_tmp_proc_fops = { .write = musbfsh_ic_tmp_proc_entry }; void create_ic_usb_cmd_proc_entry(void) { struct proc_dir_entry *prEntry; MYDBG(""); prEntry = proc_create("IC_USB_CMD_ENTRY", 0660, 0, &musbfsh_ic_usb_cmd_proc_fops); if (prEntry) { MYDBG("add /proc/IC_USB_CMD_ENTRY ok\n"); } else { MYDBG("add /proc/IC_USB_CMD_ENTRY fail\n"); } } void create_ic_tmp_entry(void) { struct proc_dir_entry *prEntry; MYDBG(""); prEntry = proc_create("IC_TMP_ENTRY", 0660, 0, &musbfsh_ic_tmp_proc_fops); if (prEntry) { MYDBG("add /proc/IC_TMP_ENTRY ok\n"); } else { MYDBG("add /proc/IC_TMP_ENTRY fail\n"); } } #endif #ifdef ORG_SUSPEND_RESUME_TEST static struct usb_hub *gt_rootHub = NULL; static struct usb_device *g_sim_dev = NULL; void mt65xx_usb11_suspend_resume_test(void); void mt65xx_usb20_suspend_resume_test(void); void dump_data(char *buf, int len) { int i; for(i =0 ; i< len ; i++) { MYDBG("data[%d]: %x\n", i, buf[i]); } } void ic_usb_test_device_ep0(char action) { MYDBG("action : %c\n", action); int ret; char data_buf[256]; int result = usb_autoresume_device(g_sim_dev); if (result < 0) { MYDBG("can't autoresume, result : %d\n", result); return -2; } else { MYDBG("autoresume ok, result : %d\n", result); } int i = 0; #define TEST_CNT 100000 switch (action) { case '1': ret = usb_control_msg(g_sim_dev, usb_rcvctrlpipe(g_sim_dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, data_buf, 64, USB_CTRL_GET_TIMEOUT); break; case '2': while(i++ < TEST_CNT) { ret = usb_control_msg(g_sim_dev, usb_rcvctrlpipe(g_sim_dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, data_buf, 64, USB_CTRL_GET_TIMEOUT); if (ret < 0) { MYDBG("test ep fail, ret : %d\n", ret); } else { MYDBG("test ep0 ok, ret : %d\n", ret); dump_data(data_buf, ret); } } break; default: break; } if (ret < 0) { MYDBG("test ep fail, ret : %d\n", ret); } else { MYDBG("test ep0 ok, ret : %d\n", ret); dump_data(data_buf, ret); } usb_autosuspend_device(g_sim_dev); } static ssize_t musbfsh_ic_tmp_proc_entry(struct file *file_ptr, char __user *user_buffer, size_t count, loff_t *position) { char cmd[64]; int ret = copy_from_user((char *) &cmd, user_buffer, count); if(ret != 0) { return -EFAULT; } struct usb_device *udev; udev = gt_rootHub->hdev; usb_lock_device(udev); int result = usb_autoresume_device(udev); if (result < 0) { MYDBG("can't autoresume, result : %d\n", result); usb_autosuspend_device(udev); return -2; } else MYDBG("autoresume !!!, result : %d\n", result); if(cmd[0] == 'g') { mt65xx_usb11_suspend_resume_test(); } else if(cmd[0] == 'h') { mt65xx_usb20_suspend_resume_test(); } else if(cmd[0] == '3') { MYDBG(""); ic_usb_test_device_ep0(cmd[1]); } usb_autosuspend_device(udev); usb_unlock_device(udev); MYDBG(""); return count; } struct file_operations musbfsh_ic_tmp_proc_fops = { .write = musbfsh_ic_tmp_proc_entry }; void create_ic_tmp_entry(void) { struct proc_dir_entry *prEntry; MYDBG(""); prEntry = proc_create("IC_TMP_ENTRY", 0660, 0, &musbfsh_ic_tmp_proc_fops); if (prEntry) { MYDBG("add /proc/IC_TMP_ENTRY ok\n"); } else { MYDBG("add /proc/IC_TMP_ENTRY fail\n"); } } #endif static inline char *portspeed(struct usb_hub *hub, int portstatus) { if (hub_is_superspeed(hub->hdev)) return "5.0 Gb/s"; if (portstatus & USB_PORT_STAT_HIGH_SPEED) return "480 Mb/s"; else if (portstatus & USB_PORT_STAT_LOW_SPEED) return "1.5 Mb/s"; else return "12 Mb/s"; } /* Note that hdev or one of its children must be locked! */ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) { if (!hdev || !hdev->actconfig || !hdev->maxchild) return NULL; return usb_get_intfdata(hdev->actconfig->interface[0]); } static int usb_device_supports_lpm(struct usb_device *udev) { /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ if (udev->speed == USB_SPEED_HIGH) { if (udev->bos->ext_cap && (USB_LPM_SUPPORT & le32_to_cpu(udev->bos->ext_cap->bmAttributes))) return 1; return 0; } /* All USB 3.0 must support LPM, but we need their max exit latency * information from the SuperSpeed Extended Capabilities BOS descriptor. */ if (!udev->bos->ss_cap) { dev_warn(&udev->dev, "No LPM exit latency info found. " "Power management will be impacted.\n"); return 0; } if (udev->parent->lpm_capable) return 1; dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. " "Power management will be impacted.\n"); return 0; } /* * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from * either U1 or U2. */ static void usb_set_lpm_mel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params, unsigned int udev_exit_latency, struct usb_hub *hub, struct usb3_lpm_parameters *hub_lpm_params, unsigned int hub_exit_latency) { unsigned int total_mel; unsigned int device_mel; unsigned int hub_mel; /* * Calculate the time it takes to transition all links from the roothub * to the parent hub into U0. The parent hub must then decode the * packet (hub header decode latency) to figure out which port it was * bound for. * * The Hub Header decode latency is expressed in 0.1us intervals (0x1 * means 0.1us). Multiply that by 100 to get nanoseconds. */ total_mel = hub_lpm_params->mel + (hub->descriptor->u.ss.bHubHdrDecLat * 100); /* * How long will it take to transition the downstream hub's port into * U0? The greater of either the hub exit latency or the device exit * latency. * * The BOS U1/U2 exit latencies are expressed in 1us intervals. * Multiply that by 1000 to get nanoseconds. */ device_mel = udev_exit_latency * 1000; hub_mel = hub_exit_latency * 1000; if (device_mel > hub_mel) total_mel += device_mel; else total_mel += hub_mel; udev_lpm_params->mel = total_mel; } /* * Set the maximum Device to Host Exit Latency (PEL) for the device to initiate * a transition from either U1 or U2. */ static void usb_set_lpm_pel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params, unsigned int udev_exit_latency, struct usb_hub *hub, struct usb3_lpm_parameters *hub_lpm_params, unsigned int hub_exit_latency, unsigned int port_to_port_exit_latency) { unsigned int first_link_pel; unsigned int hub_pel; /* * First, the device sends an LFPS to transition the link between the * device and the parent hub into U0. The exit latency is the bigger of * the device exit latency or the hub exit latency. */ if (udev_exit_latency > hub_exit_latency) first_link_pel = udev_exit_latency * 1000; else first_link_pel = hub_exit_latency * 1000; /* * When the hub starts to receive the LFPS, there is a slight delay for * it to figure out that one of the ports is sending an LFPS. Then it * will forward the LFPS to its upstream link. The exit latency is the * delay, plus the PEL that we calculated for this hub. */ hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel; /* * According to figure C-7 in the USB 3.0 spec, the PEL for this device * is the greater of the two exit latencies. */ if (first_link_pel > hub_pel) udev_lpm_params->pel = first_link_pel; else udev_lpm_params->pel = hub_pel; } /* * Set the System Exit Latency (SEL) to indicate the total worst-case time from * when a device initiates a transition to U0, until when it will receive the * first packet from the host controller. * * Section C.1.5.1 describes the four components to this: * - t1: device PEL * - t2: time for the ERDY to make it from the device to the host. * - t3: a host-specific delay to process the ERDY. * - t4: time for the packet to make it from the host to the device. * * t3 is specific to both the xHCI host and the platform the host is integrated * into. The Intel HW folks have said it's negligible, FIXME if a different * vendor says otherwise. */ static void usb_set_lpm_sel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params) { struct usb_device *parent; unsigned int num_hubs; unsigned int total_sel; /* t1 = device PEL */ total_sel = udev_lpm_params->pel; /* How many external hubs are in between the device & the root port. */ for (parent = udev->parent, num_hubs = 0; parent->parent; parent = parent->parent) num_hubs++; /* t2 = 2.1us + 250ns * (num_hubs - 1) */ if (num_hubs > 0) total_sel += 2100 + 250 * (num_hubs - 1); /* t4 = 250ns * num_hubs */ total_sel += 250 * num_hubs; udev_lpm_params->sel = total_sel; } static void usb_set_lpm_parameters(struct usb_device *udev) { struct usb_hub *hub; unsigned int port_to_port_delay; unsigned int udev_u1_del; unsigned int udev_u2_del; unsigned int hub_u1_del; unsigned int hub_u2_del; if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER) return; hub = usb_hub_to_struct_hub(udev->parent); /* It doesn't take time to transition the roothub into U0, since it * doesn't have an upstream link. */ if (!hub) return; udev_u1_del = udev->bos->ss_cap->bU1devExitLat; udev_u2_del = udev->bos->ss_cap->bU2DevExitLat; hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat; hub_u2_del = udev->parent->bos->ss_cap->bU2DevExitLat; usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del, hub, &udev->parent->u1_params, hub_u1_del); usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del, hub, &udev->parent->u2_params, hub_u2_del); /* * Appendix C, section C.2.2.2, says that there is a slight delay from * when the parent hub notices the downstream port is trying to * transition to U0 to when the hub initiates a U0 transition on its * upstream port. The section says the delays are tPort2PortU1EL and * tPort2PortU2EL, but it doesn't define what they are. * * The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking * about the same delays. Use the maximum delay calculations from those * sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For * U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I * assume the device exit latencies they are talking about are the hub * exit latencies. * * What do we do if the U2 exit latency is less than the U1 exit * latency? It's possible, although not likely... */ port_to_port_delay = 1; usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del, hub, &udev->parent->u1_params, hub_u1_del, port_to_port_delay); if (hub_u2_del > hub_u1_del) port_to_port_delay = 1 + hub_u2_del - hub_u1_del; else port_to_port_delay = 1 + hub_u1_del; usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del, hub, &udev->parent->u2_params, hub_u2_del, port_to_port_delay); /* Now that we've got PEL, calculate SEL. */ usb_set_lpm_sel(udev, &udev->u1_params); usb_set_lpm_sel(udev, &udev->u2_params); } /* USB 2.0 spec Section 11.24.4.5 */ static int get_hub_descriptor(struct usb_device *hdev, void *data) { int i, ret, size; unsigned dtype; if (hub_is_superspeed(hdev)) { dtype = USB_DT_SS_HUB; size = USB_DT_SS_HUB_SIZE; } else { dtype = USB_DT_HUB; size = sizeof(struct usb_hub_descriptor); } for (i = 0; i < 3; i++) { ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, dtype << 8, 0, data, size, USB_CTRL_GET_TIMEOUT); if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) return ret; } return -EINVAL; } /* * USB 2.0 spec Section 11.24.2.1 */ static int clear_hub_feature(struct usb_device *hdev, int feature) { return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000); } /* * USB 2.0 spec Section 11.24.2.2 */ int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature) { return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1, NULL, 0, 1000); } /* * USB 2.0 spec Section 11.24.2.13 */ static int set_port_feature(struct usb_device *hdev, int port1, int feature) { return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1, NULL, 0, 1000); } /* * USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7 * for info about using port indicators */ static void set_port_led( struct usb_hub *hub, int port1, int selector ) { int status = set_port_feature(hub->hdev, (selector << 8) | port1, USB_PORT_FEAT_INDICATOR); if (status < 0) dev_dbg (hub->intfdev, "port %d indicator %s status %d\n", port1, ({ char *s; switch (selector) { case HUB_LED_AMBER: s = "amber"; break; case HUB_LED_GREEN: s = "green"; break; case HUB_LED_OFF: s = "off"; break; case HUB_LED_AUTO: s = "auto"; break; default: s = "??"; break; }; s; }), status); } #define LED_CYCLE_PERIOD ((2*HZ)/3) static void led_work (struct work_struct *work) { struct usb_hub *hub = container_of(work, struct usb_hub, leds.work); struct usb_device *hdev = hub->hdev; unsigned i; unsigned changed = 0; int cursor = -1; if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing) return; for (i = 0; i < hub->descriptor->bNbrPorts; i++) { unsigned selector, mode; /* 30%-50% duty cycle */ switch (hub->indicator[i]) { /* cycle marker */ case INDICATOR_CYCLE: cursor = i; selector = HUB_LED_AUTO; mode = INDICATOR_AUTO; break; /* blinking green = sw attention */ case INDICATOR_GREEN_BLINK: selector = HUB_LED_GREEN; mode = INDICATOR_GREEN_BLINK_OFF; break; case INDICATOR_GREEN_BLINK_OFF: selector = HUB_LED_OFF; mode = INDICATOR_GREEN_BLINK; break; /* blinking amber = hw attention */ case INDICATOR_AMBER_BLINK: selector = HUB_LED_AMBER; mode = INDICATOR_AMBER_BLINK_OFF; break; case INDICATOR_AMBER_BLINK_OFF: selector = HUB_LED_OFF; mode = INDICATOR_AMBER_BLINK; break; /* blink green/amber = reserved */ case INDICATOR_ALT_BLINK: selector = HUB_LED_GREEN; mode = INDICATOR_ALT_BLINK_OFF; break; case INDICATOR_ALT_BLINK_OFF: selector = HUB_LED_AMBER; mode = INDICATOR_ALT_BLINK; break; default: continue; } if (selector != HUB_LED_AUTO) changed = 1; set_port_led(hub, i + 1, selector); hub->indicator[i] = mode; } if (!changed && blinkenlights) { cursor++; cursor %= hub->descriptor->bNbrPorts; set_port_led(hub, cursor + 1, HUB_LED_GREEN); hub->indicator[cursor] = INDICATOR_CYCLE; changed++; } if (changed) schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD); } /* use a short timeout for hub/port status fetches */ #define USB_STS_TIMEOUT 1000 #define USB_STS_RETRIES 5 /* * USB 2.0 spec Section 11.24.2.6 */ static int get_hub_status(struct usb_device *hdev, struct usb_hub_status *data) { int i, status = -ETIMEDOUT; for (i = 0; i < USB_STS_RETRIES && (status == -ETIMEDOUT || status == -EPIPE); i++) { status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0, data, sizeof(*data), USB_STS_TIMEOUT); } return status; } /* * USB 2.0 spec Section 11.24.2.7 */ static int get_port_status(struct usb_device *hdev, int port1, struct usb_port_status *data) { int i, status = -ETIMEDOUT; for (i = 0; i < USB_STS_RETRIES && (status == -ETIMEDOUT || status == -EPIPE); i++) { status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1, data, sizeof(*data), USB_STS_TIMEOUT); } return status; } static int hub_port_status(struct usb_hub *hub, int port1, u16 *status, u16 *change) { int ret; mutex_lock(&hub->status_mutex); ret = get_port_status(hub->hdev, port1, &hub->status->port); if (ret < 4) { if (ret != -ENODEV) dev_err(hub->intfdev, "%s failed (err = %d)\n", __func__, ret); if (ret >= 0) ret = -EIO; } else { *status = le16_to_cpu(hub->status->port.wPortStatus); *change = le16_to_cpu(hub->status->port.wPortChange); ret = 0; } mutex_unlock(&hub->status_mutex); return ret; } static void kick_khubd(struct usb_hub *hub) { unsigned long flags; spin_lock_irqsave(&hub_event_lock, flags); if (!hub->disconnected && list_empty(&hub->event_list)) { list_add_tail(&hub->event_list, &hub_event_list); /* Suppress autosuspend until khubd runs */ usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); wake_up(&khubd_wait); } spin_unlock_irqrestore(&hub_event_lock, flags); } void usb_kick_khubd(struct usb_device *hdev) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (hub) kick_khubd(hub); } /* * Let the USB core know that a USB 3.0 device has sent a Function Wake Device * Notification, which indicates it had initiated remote wakeup. * * USB 3.0 hubs do not report the port link state change from U3 to U0 when the * device initiates resume, so the USB core will not receive notice of the * resume through the normal hub interrupt URB. */ void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum) { struct usb_hub *hub; if (!hdev) return; hub = usb_hub_to_struct_hub(hdev); if (hub) { set_bit(portnum, hub->wakeup_bits); kick_khubd(hub); } } EXPORT_SYMBOL_GPL(usb_wakeup_notification); /* completion function, fires on port status changes and various faults */ static void hub_irq(struct urb *urb) { struct usb_hub *hub = urb->context; int status = urb->status; unsigned i; unsigned long bits; switch (status) { case -ENOENT: /* synchronous unlink */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware going away */ return; default: /* presumably an error */ /* Cause a hub reset after 10 consecutive errors */ dev_dbg (hub->intfdev, "transfer --> %d\n", status); if ((++hub->nerrors < 10) || hub->error) goto resubmit; hub->error = status; /* FALL THROUGH */ /* let khubd handle things */ case 0: /* we got data: port status changed */ bits = 0; for (i = 0; i < urb->actual_length; ++i) bits |= ((unsigned long) ((*hub->buffer)[i])) << (i*8); hub->event_bits[0] = bits; break; } hub->nerrors = 0; /* Something happened, let khubd figure it out */ kick_khubd(hub); resubmit: if (hub->quiescing) return; if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0 && status != -ENODEV && status != -EPERM) dev_err (hub->intfdev, "resubmit --> %d\n", status); } /* USB 2.0 spec Section 11.24.2.3 */ static inline int hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) { /* Need to clear both directions for control ep */ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_CONTROL) { int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo ^ 0x8000, tt, NULL, 0, 1000); if (status) return status; } return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, tt, NULL, 0, 1000); } /* * enumeration blocks khubd for a long time. we use keventd instead, since * long blocking there is the exception, not the rule. accordingly, HCDs * talking to TTs must queue control transfers (not just bulk and iso), so * both can talk to the same hub concurrently. */ static void hub_tt_work(struct work_struct *work) { struct usb_hub *hub = container_of(work, struct usb_hub, tt.clear_work); unsigned long flags; spin_lock_irqsave (&hub->tt.lock, flags); while (!list_empty(&hub->tt.clear_list)) { struct list_head *next; struct usb_tt_clear *clear; struct usb_device *hdev = hub->hdev; const struct hc_driver *drv; int status; next = hub->tt.clear_list.next; clear = list_entry (next, struct usb_tt_clear, clear_list); list_del (&clear->clear_list); /* drop lock so HCD can concurrently report other TT errors */ spin_unlock_irqrestore (&hub->tt.lock, flags); status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt); if (status && status != -ENODEV) dev_err (&hdev->dev, "clear tt %d (%04x) error %d\n", clear->tt, clear->devinfo, status); /* Tell the HCD, even if the operation failed */ drv = clear->hcd->driver; if (drv->clear_tt_buffer_complete) (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep); kfree(clear); spin_lock_irqsave(&hub->tt.lock, flags); } spin_unlock_irqrestore (&hub->tt.lock, flags); } /** * usb_hub_set_port_power - control hub port's power state * @hdev: target hub * @port1: port index * @set: expected status * * call this function to control port's power via setting or * clearing the port's PORT_POWER feature. */ int usb_hub_set_port_power(struct usb_device *hdev, int port1, bool set) { int ret; struct usb_hub *hub = usb_hub_to_struct_hub(hdev); struct usb_port *port_dev = hub->ports[port1 - 1]; if (set) ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); else ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER); if (!ret) port_dev->power_is_on = set; return ret; } /** * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub * @urb: an URB associated with the failed or incomplete split transaction * * High speed HCDs use this to tell the hub driver that some split control or * bulk transaction failed in a way that requires clearing internal state of * a transaction translator. This is normally detected (and reported) from * interrupt context. * * It may not be possible for that hub to handle additional full (or low) * speed transactions until that state is fully cleared out. */ int usb_hub_clear_tt_buffer(struct urb *urb) { struct usb_device *udev = urb->dev; int pipe = urb->pipe; struct usb_tt *tt = udev->tt; unsigned long flags; struct usb_tt_clear *clear; /* we've got to cope with an arbitrary number of pending TT clears, * since each TT has "at least two" buffers that can need it (and * there can be many TTs per hub). even if they're uncommon. */ if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); /* FIXME recover somehow ... RESET_TT? */ return -ENOMEM; } /* info that CLEAR_TT_BUFFER needs */ clear->tt = tt->multi ? udev->ttport : 1; clear->devinfo = usb_pipeendpoint (pipe); clear->devinfo |= udev->devnum << 4; clear->devinfo |= usb_pipecontrol (pipe) ? (USB_ENDPOINT_XFER_CONTROL << 11) : (USB_ENDPOINT_XFER_BULK << 11); if (usb_pipein (pipe)) clear->devinfo |= 1 << 15; /* info for completion callback */ clear->hcd = bus_to_hcd(udev->bus); clear->ep = urb->ep; /* tell keventd to clear state for this TT */ spin_lock_irqsave (&tt->lock, flags); list_add_tail (&clear->clear_list, &tt->clear_list); schedule_work(&tt->clear_work); spin_unlock_irqrestore (&tt->lock, flags); return 0; } EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer); /* If do_delay is false, return the number of milliseconds the caller * needs to delay. */ static unsigned hub_power_on(struct usb_hub *hub, bool do_delay) { int port1; unsigned pgood_delay = hub->descriptor->bPwrOn2PwrGood * 2; unsigned delay; u16 wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); /* Enable power on each port. Some hubs have reserved values * of LPSM (> 2) in their descriptors, even though they are * USB 2.0 hubs. Some hubs do not implement port-power switching * but only emulate it. In all cases, the ports won't work * unless we send these messages to the hub. */ if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2) dev_dbg(hub->intfdev, "enabling power on all ports\n"); else dev_dbg(hub->intfdev, "trying to enable port power on " "non-switchable hub\n"); for (port1 = 1; port1 <= hub->descriptor->bNbrPorts; port1++) if (hub->ports[port1 - 1]->power_is_on) set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER); else usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER); /* Wait at least 100 msec for power to become stable */ delay = max(pgood_delay, (unsigned) 100); if (do_delay) msleep(delay); return delay; } static int hub_hub_status(struct usb_hub *hub, u16 *status, u16 *change) { int ret; mutex_lock(&hub->status_mutex); ret = get_hub_status(hub->hdev, &hub->status->hub); if (ret < 0) { if (ret != -ENODEV) dev_err(hub->intfdev, "%s failed (err = %d)\n", __func__, ret); } else { *status = le16_to_cpu(hub->status->hub.wHubStatus); *change = le16_to_cpu(hub->status->hub.wHubChange); ret = 0; } mutex_unlock(&hub->status_mutex); return ret; } static int hub_set_port_link_state(struct usb_hub *hub, int port1, unsigned int link_status) { return set_port_feature(hub->hdev, port1 | (link_status << 3), USB_PORT_FEAT_LINK_STATE); } /* * If USB 3.0 ports are placed into the Disabled state, they will no longer * detect any device connects or disconnects. This is generally not what the * USB core wants, since it expects a disabled port to produce a port status * change event when a new device connects. * * Instead, set the link state to Disabled, wait for the link to settle into * that state, clear any change bits, and then put the port into the RxDetect * state. */ static int hub_usb3_port_disable(struct usb_hub *hub, int port1) { int ret; int total_time; u16 portchange, portstatus; if (!hub_is_superspeed(hub->hdev)) return -EINVAL; ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; /* * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI * Controller [1022:7814] will have spurious result making the following * usb 3.0 device hotplugging route to the 2.0 root hub and recognized * as high-speed device if we set the usb 3.0 port link state to * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we * check the state here to avoid the bug. */ if ((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_RX_DETECT) { dev_dbg(&hub->ports[port1 - 1]->dev, "Not disabling port; link state is RxDetect\n"); return ret; } ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); if (ret) return ret; /* Wait for the link to enter the disabled state. */ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; if ((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_SS_DISABLED) break; if (total_time >= HUB_DEBOUNCE_TIMEOUT) break; msleep(HUB_DEBOUNCE_STEP); } if (total_time >= HUB_DEBOUNCE_TIMEOUT) dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n", port1, total_time); return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); } static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) { struct usb_device *hdev = hub->hdev; int ret = 0; if (hub->ports[port1 - 1]->child && set_state) usb_set_device_state(hub->ports[port1 - 1]->child, USB_STATE_NOTATTACHED); if (!hub->error) { if (hub_is_superspeed(hub->hdev)) ret = hub_usb3_port_disable(hub, port1); else ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); } if (ret && ret != -ENODEV) dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", port1, ret); return ret; } /* * Disable a port and mark a logical connect-change event, so that some * time later khubd will disconnect() any existing usb_device on the port * and will re-enumerate if there actually is a device attached. */ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1) { dev_dbg(hub->intfdev, "logical disconnect on port %d\n", port1); hub_port_disable(hub, port1, 1); /* FIXME let caller ask to power down the port: * - some devices won't enumerate without a VBUS power cycle * - SRP saves power that way * - ... new call, TBD ... * That's easy if this hub can switch power per-port, and * khubd reactivates the port later (timer, SRP, etc). * Powerdown must be optional, because of reset/DFU. */ set_bit(port1, hub->change_bits); kick_khubd(hub); } /** * usb_remove_device - disable a device's port on its parent hub * @udev: device to be disabled and removed * Context: @udev locked, must be able to sleep. * * After @udev's port has been disabled, khubd is notified and it will * see that the device has been disconnected. When the device is * physically unplugged and something is plugged in, the events will * be received and processed normally. */ int usb_remove_device(struct usb_device *udev) { struct usb_hub *hub; struct usb_interface *intf; if (!udev->parent) /* Can't remove a root hub */ return -EINVAL; hub = usb_hub_to_struct_hub(udev->parent); intf = to_usb_interface(hub->intfdev); usb_autopm_get_interface(intf); set_bit(udev->portnum, hub->removed_bits); hub_port_logical_disconnect(hub, udev->portnum); usb_autopm_put_interface(intf); return 0; } enum hub_activation_type { HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */ HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, }; static void hub_init_func2(struct work_struct *ws); static void hub_init_func3(struct work_struct *ws); static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) { struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd; int ret; int port1; int status; bool need_debounce_delay = false; unsigned delay; /* Continue a partial initialization */ if (type == HUB_INIT2) goto init2; if (type == HUB_INIT3) goto init3; /* The superspeed hub except for root hub has to use Hub Depth * value as an offset into the route string to locate the bits * it uses to determine the downstream port number. So hub driver * should send a set hub depth request to superspeed hub after * the superspeed hub is set configuration in initialization or * reset procedure. * * After a resume, port power should still be on. * For any other type of activation, turn it on. */ if (type != HUB_RESUME) { if (hdev->parent && hub_is_superspeed(hdev)) { ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_SET_DEPTH, USB_RT_HUB, hdev->level - 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) dev_err(hub->intfdev, "set hub depth failed\n"); } /* Speed up system boot by using a delayed_work for the * hub's initial power-up delays. This is pretty awkward * and the implementation looks like a home-brewed sort of * setjmp/longjmp, but it saves at least 100 ms for each * root hub (assuming usbcore is compiled into the kernel * rather than as a module). It adds up. * * This can't be done for HUB_RESUME or HUB_RESET_RESUME * because for those activation types the ports have to be * operational when we return. In theory this could be done * for HUB_POST_RESET, but it's easier not to. */ if (type == HUB_INIT) { delay = hub_power_on(hub, false); PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2); schedule_delayed_work(&hub->init_work, msecs_to_jiffies(delay)); /* Suppress autosuspend until init is done */ usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ } else if (type == HUB_RESET_RESUME) { /* The internal host controller state for the hub device * may be gone after a host power loss on system resume. * Update the device's info so the HW knows it's a hub. */ hcd = bus_to_hcd(hdev->bus); if (hcd->driver->update_hub_device) { ret = hcd->driver->update_hub_device(hcd, hdev, &hub->tt, GFP_NOIO); if (ret < 0) { dev_err(hub->intfdev, "Host not " "accepting hub info " "update.\n"); dev_err(hub->intfdev, "LS/FS devices " "and hubs may not work " "under this hub\n."); } } hub_power_on(hub, true); } else { hub_power_on(hub, true); } } init2: /* Check each port and set hub->change_bits to let khubd know * which ports need attention. */ for (port1 = 1; port1 <= hdev->maxchild; ++port1) { struct usb_device *udev = hub->ports[port1 - 1]->child; u16 portstatus, portchange; portstatus = portchange = 0; status = hub_port_status(hub, port1, &portstatus, &portchange); if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) dev_dbg(hub->intfdev, "port %d: status %04x change %04x\n", port1, portstatus, portchange); /* After anything other than HUB_RESUME (i.e., initialization * or any sort of reset), every port should be disabled. * Unconnected ports should likewise be disabled (paranoia), * and so should ports for which we have no usb_device. */ if ((portstatus & USB_PORT_STAT_ENABLE) && ( type != HUB_RESUME || !(portstatus & USB_PORT_STAT_CONNECTION) || !udev || udev->state == USB_STATE_NOTATTACHED)) { /* * USB3 protocol ports will automatically transition * to Enabled state when detect an USB3.0 device attach. * Do not disable USB3 protocol ports. */ if (!hub_is_superspeed(hdev)) { usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); portstatus &= ~USB_PORT_STAT_ENABLE; } else { /* Pretend that power was lost for USB3 devs */ portstatus &= ~USB_PORT_STAT_ENABLE; } } /* Clear status-change flags; we'll debounce later */ if (portchange & USB_PORT_STAT_C_CONNECTION) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (portchange & USB_PORT_STAT_C_ENABLE) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); } if (portchange & USB_PORT_STAT_C_RESET) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); } if ((portchange & USB_PORT_STAT_C_BH_RESET) && hub_is_superspeed(hub->hdev)) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); } /* We can forget about a "removed" device when there's a * physical disconnect or the connect status changes. */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || (portchange & USB_PORT_STAT_C_CONNECTION)) clear_bit(port1, hub->removed_bits); if (!udev || udev->state == USB_STATE_NOTATTACHED) { /* Tell khubd to disconnect the device or * check for a new connection */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT)) set_bit(port1, hub->change_bits); } else if (portstatus & USB_PORT_STAT_ENABLE) { bool port_resumed = (portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0; /* The power session apparently survived the resume. * If there was an overcurrent or suspend change * (i.e., remote wakeup request), have khubd * take care of it. Look at the port link state * for USB 3.0 hubs, since they don't have a suspend * change bit, and they don't set the port link change * bit on device-initiated resume. */ if (portchange || (hub_is_superspeed(hub->hdev) && port_resumed)) set_bit(port1, hub->change_bits); } else if (udev->persist_enabled) { struct usb_port *port_dev = hub->ports[port1 - 1]; #ifdef CONFIG_PM udev->reset_resume = 1; #endif /* Don't set the change_bits when the device * was powered off. */ if (port_dev->power_is_on) set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell khubd */ usb_set_device_state(udev, USB_STATE_NOTATTACHED); set_bit(port1, hub->change_bits); } } /* If no port-status-change flags were set, we don't need any * debouncing. If flags were set we can try to debounce the * ports all at once right now, instead of letting khubd do them * one at a time later on. * * If any port-status changes do occur during this delay, khubd * will see them later and handle them normally. */ if (need_debounce_delay) { delay = HUB_DEBOUNCE_STABLE; /* Don't do a long sleep inside a workqueue routine */ if (type == HUB_INIT2) { PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3); schedule_delayed_work(&hub->init_work, msecs_to_jiffies(delay)); return; /* Continues at init3: below */ } else { msleep(delay); } } init3: hub->quiescing = 0; status = usb_submit_urb(hub->urb, GFP_NOIO); if (status < 0) dev_err(hub->intfdev, "activate --> %d\n", status); if (hub->has_indicators && blinkenlights) schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD); /* Scan all ports that need attention */ kick_khubd(hub); /* Allow autosuspend if it was suppressed */ if (type <= HUB_INIT3) usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); } /* Implement the continuations for the delays above */ static void hub_init_func2(struct work_struct *ws) { struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); hub_activate(hub, HUB_INIT2); } static void hub_init_func3(struct work_struct *ws) { struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); hub_activate(hub, HUB_INIT3); } enum hub_quiescing_type { HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND }; static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) { struct usb_device *hdev = hub->hdev; int i; cancel_delayed_work_sync(&hub->init_work); /* khubd and related activity won't re-trigger */ hub->quiescing = 1; if (type != HUB_SUSPEND) { /* Disconnect all the children */ for (i = 0; i < hdev->maxchild; ++i) { if (hub->ports[i]->child) usb_disconnect(&hub->ports[i]->child); } } /* Stop khubd and related activity */ usb_kill_urb(hub->urb); if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); if (hub->tt.hub) flush_work(&hub->tt.clear_work); } /* caller has locked the hub device */ static int hub_pre_reset(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); hub_quiesce(hub, HUB_PRE_RESET); return 0; } /* caller has locked the hub device */ static int hub_post_reset(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); hub_activate(hub, HUB_POST_RESET); return 0; } static int hub_configure(struct usb_hub *hub, struct usb_endpoint_descriptor *endpoint) { struct usb_hcd *hcd; struct usb_device *hdev = hub->hdev; struct device *hub_dev = hub->intfdev; u16 hubstatus, hubchange; u16 wHubCharacteristics; unsigned int pipe; int maxp, ret, i; char *message = "out of memory"; unsigned unit_load; unsigned full_load; hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL); if (!hub->buffer) { ret = -ENOMEM; goto fail; } hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL); if (!hub->status) { ret = -ENOMEM; goto fail; } mutex_init(&hub->status_mutex); hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); if (!hub->descriptor) { ret = -ENOMEM; goto fail; } /* Request the entire hub descriptor. * hub->descriptor can handle USB_MAXCHILDREN ports, * but the hub can/will return fewer bytes here. */ ret = get_hub_descriptor(hdev, hub->descriptor); if (ret < 0) { message = "can't read hub descriptor"; goto fail; } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { message = "hub has too many ports!"; ret = -ENODEV; goto fail; } else if (hub->descriptor->bNbrPorts == 0) { message = "hub doesn't have any ports!"; ret = -ENODEV; goto fail; } hdev->maxchild = hub->descriptor->bNbrPorts; dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild, (hdev->maxchild == 1) ? "" : "s"); hub->ports = kzalloc(hdev->maxchild * sizeof(struct usb_port *), GFP_KERNEL); if (!hub->ports) { ret = -ENOMEM; goto fail; } wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); if (hub_is_superspeed(hdev)) { unit_load = 150; full_load = 900; } else { unit_load = 100; full_load = 500; } /* FIXME for USB 3.0, skip for now */ if ((wHubCharacteristics & HUB_CHAR_COMPOUND) && !(hub_is_superspeed(hdev))) { int i; char portstr [USB_MAXCHILDREN + 1]; for (i = 0; i < hdev->maxchild; i++) portstr[i] = hub->descriptor->u.hs.DeviceRemovable [((i + 1) / 8)] & (1 << ((i + 1) % 8)) ? 'F' : 'R'; portstr[hdev->maxchild] = 0; dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr); } else dev_dbg(hub_dev, "standalone hub\n"); switch (wHubCharacteristics & HUB_CHAR_LPSM) { case HUB_CHAR_COMMON_LPSM: dev_dbg(hub_dev, "ganged power switching\n"); break; case HUB_CHAR_INDV_PORT_LPSM: dev_dbg(hub_dev, "individual port power switching\n"); break; case HUB_CHAR_NO_LPSM: case HUB_CHAR_LPSM: dev_dbg(hub_dev, "no power switching (usb 1.0)\n"); break; } switch (wHubCharacteristics & HUB_CHAR_OCPM) { case HUB_CHAR_COMMON_OCPM: dev_dbg(hub_dev, "global over-current protection\n"); break; case HUB_CHAR_INDV_PORT_OCPM: dev_dbg(hub_dev, "individual port over-current protection\n"); break; case HUB_CHAR_NO_OCPM: case HUB_CHAR_OCPM: dev_dbg(hub_dev, "no over-current protection\n"); break; } spin_lock_init (&hub->tt.lock); INIT_LIST_HEAD (&hub->tt.clear_list); INIT_WORK(&hub->tt.clear_work, hub_tt_work); switch (hdev->descriptor.bDeviceProtocol) { case USB_HUB_PR_FS: break; case USB_HUB_PR_HS_SINGLE_TT: dev_dbg(hub_dev, "Single TT\n"); hub->tt.hub = hdev; break; case USB_HUB_PR_HS_MULTI_TT: ret = usb_set_interface(hdev, 0, 1); if (ret == 0) { dev_dbg(hub_dev, "TT per port\n"); hub->tt.multi = 1; } else dev_err(hub_dev, "Using single TT (err %d)\n", ret); hub->tt.hub = hdev; break; case USB_HUB_PR_SS: /* USB 3.0 hubs don't have a TT */ break; default: dev_dbg(hub_dev, "Unrecognized hub protocol %d\n", hdev->descriptor.bDeviceProtocol); break; } /* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */ switch (wHubCharacteristics & HUB_CHAR_TTTT) { case HUB_TTTT_8_BITS: if (hdev->descriptor.bDeviceProtocol != 0) { hub->tt.think_time = 666; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 8, hub->tt.think_time); } break; case HUB_TTTT_16_BITS: hub->tt.think_time = 666 * 2; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 16, hub->tt.think_time); break; case HUB_TTTT_24_BITS: hub->tt.think_time = 666 * 3; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 24, hub->tt.think_time); break; case HUB_TTTT_32_BITS: hub->tt.think_time = 666 * 4; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 32, hub->tt.think_time); break; } /* probe() zeroes hub->indicator[] */ if (wHubCharacteristics & HUB_CHAR_PORTIND) { hub->has_indicators = 1; dev_dbg(hub_dev, "Port indicators are supported\n"); } dev_dbg(hub_dev, "power on to power good time: %dms\n", hub->descriptor->bPwrOn2PwrGood * 2); /* power budgeting mostly matters with bus-powered hubs, * and battery-powered root hubs (may provide just 8 mA). */ ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus); if (ret < 2) { message = "can't get hub status"; goto fail; } le16_to_cpus(&hubstatus); hcd = bus_to_hcd(hdev->bus); if (hdev == hdev->bus->root_hub) { if (hcd->power_budget > 0) hdev->bus_mA = hcd->power_budget; else hdev->bus_mA = full_load * hdev->maxchild; if (hdev->bus_mA >= full_load) hub->mA_per_port = full_load; else { hub->mA_per_port = hdev->bus_mA; hub->limited_power = 1; } } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { int remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent; dev_dbg(hub_dev, "hub controller current requirement: %dmA\n", hub->descriptor->bHubContrCurrent); hub->limited_power = 1; if (remaining < hdev->maxchild * unit_load) dev_warn(hub_dev, "insufficient power available " "to use all downstream ports\n"); hub->mA_per_port = unit_load; /* 7.2.1 */ } else { /* Self-powered external hub */ /* FIXME: What about battery-powered external hubs that * provide less current per port? */ hub->mA_per_port = full_load; } if (hub->mA_per_port < full_load) dev_dbg(hub_dev, "%umA bus power budget for each child\n", hub->mA_per_port); /* Update the HCD's internal representation of this hub before khubd * starts getting port status changes for devices under the hub. */ if (hcd->driver->update_hub_device) { ret = hcd->driver->update_hub_device(hcd, hdev, &hub->tt, GFP_KERNEL); if (ret < 0) { message = "can't update HCD hub info"; goto fail; } } ret = hub_hub_status(hub, &hubstatus, &hubchange); if (ret < 0) { message = "can't get hub status"; goto fail; } /* local power status reports aren't always correct */ if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER) dev_dbg(hub_dev, "local power source is %s\n", (hubstatus & HUB_STATUS_LOCAL_POWER) ? "lost (inactive)" : "good"); if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0) dev_dbg(hub_dev, "%sover-current condition exists\n", (hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no "); /* set up the interrupt endpoint * We use the EP's maxpacket size instead of (PORTS+1+7)/8 * bytes as USB2.0[11.12.3] says because some hubs are known * to send more data (and thus cause overflow). For root hubs, * maxpktsize is defined in hcd.c's fake endpoint descriptors * to be big enough for at least USB_MAXCHILDREN ports. */ pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress); maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe)); if (maxp > sizeof(*hub->buffer)) maxp = sizeof(*hub->buffer); hub->urb = usb_alloc_urb(0, GFP_KERNEL); if (!hub->urb) { ret = -ENOMEM; goto fail; } usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq, hub, endpoint->bInterval); /* maybe cycle the hub leds */ if (hub->has_indicators && blinkenlights) hub->indicator [0] = INDICATOR_CYCLE; for (i = 0; i < hdev->maxchild; i++) { ret = usb_hub_create_port_device(hub, i + 1); if (ret < 0) { dev_err(hub->intfdev, "couldn't create port%d device.\n", i + 1); hdev->maxchild = i; goto fail_keep_maxchild; } } usb_hub_adjust_deviceremovable(hdev, hub->descriptor); hub_activate(hub, HUB_INIT); return 0; fail: hdev->maxchild = 0; fail_keep_maxchild: dev_err (hub_dev, "config failed, %s (err %d)\n", message, ret); /* hub_disconnect() frees urb and descriptor */ return ret; } static void hub_release(struct kref *kref) { struct usb_hub *hub = container_of(kref, struct usb_hub, kref); usb_put_intf(to_usb_interface(hub->intfdev)); kfree(hub); } static unsigned highspeed_hubs; static void hub_disconnect(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); struct usb_device *hdev = interface_to_usbdev(intf); int port1; /* Take the hub off the event list and don't let it be added again */ spin_lock_irq(&hub_event_lock); if (!list_empty(&hub->event_list)) { list_del_init(&hub->event_list); usb_autopm_put_interface_no_suspend(intf); } hub->disconnected = 1; spin_unlock_irq(&hub_event_lock); /* Disconnect all children and quiesce the hub */ hub->error = 0; hub_quiesce(hub, HUB_DISCONNECT); /* Avoid races with recursively_mark_NOTATTACHED() */ spin_lock_irq(&device_state_lock); port1 = hdev->maxchild; hdev->maxchild = 0; usb_set_intfdata(intf, NULL); spin_unlock_irq(&device_state_lock); for (; port1 > 0; --port1) usb_hub_remove_port_device(hub, port1); if (hub->hdev->speed == USB_SPEED_HIGH) highspeed_hubs--; usb_free_urb(hub->urb); kfree(hub->ports); kfree(hub->descriptor); kfree(hub->status); kfree(hub->buffer); pm_suspend_ignore_children(&intf->dev, false); kref_put(&hub->kref, hub_release); } static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *desc; struct usb_endpoint_descriptor *endpoint; struct usb_device *hdev; struct usb_hub *hub; desc = intf->cur_altsetting; hdev = interface_to_usbdev(intf); /* * Set default autosuspend delay as 0 to speedup bus suspend, * based on the below considerations: * * - Unlike other drivers, the hub driver does not rely on the * autosuspend delay to provide enough time to handle a wakeup * event, and the submitted status URB is just to check future * change on hub downstream ports, so it is safe to do it. * * - The patch might cause one or more auto supend/resume for * below very rare devices when they are plugged into hub * first time: * * devices having trouble initializing, and disconnect * themselves from the bus and then reconnect a second * or so later * * devices just for downloading firmware, and disconnects * themselves after completing it * * For these quite rare devices, their drivers may change the * autosuspend delay of their parent hub in the probe() to one * appropriate value to avoid the subtle problem if someone * does care it. * * - The patch may cause one or more auto suspend/resume on * hub during running 'lsusb', but it is probably too * infrequent to worry about. * * - Change autosuspend delay of hub can avoid unnecessary auto * suspend timer for hub, also may decrease power consumption * of USB bus. * * - If user has indicated to prevent autosuspend by passing * usbcore.autosuspend = -1 then keep autosuspend disabled. */ #ifdef CONFIG_PM_RUNTIME if (hdev->dev.power.autosuspend_delay >= 0) pm_runtime_set_autosuspend_delay(&hdev->dev, 0); #endif /* * Hubs have proper suspend/resume support, except for root hubs * where the controller driver doesn't have bus_suspend and * bus_resume methods. */ if (hdev->parent) { /* normal device */ usb_enable_autosuspend(hdev); } else { /* root hub */ const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver; if (drv->bus_suspend && drv->bus_resume) usb_enable_autosuspend(hdev); } if (hdev->level == MAX_TOPO_LEVEL) { dev_err(&intf->dev, "Unsupported bus topology: hub nested too deep\n"); return -E2BIG; } #ifdef CONFIG_USB_OTG_BLACKLIST_HUB if (hdev->parent) { dev_warn(&intf->dev, "ignoring external hub\n"); return -ENODEV; } #endif /* Some hubs have a subclass of 1, which AFAICT according to the */ /* specs is not defined, but it works */ if ((desc->desc.bInterfaceSubClass != 0) && (desc->desc.bInterfaceSubClass != 1)) { descriptor_error: dev_err (&intf->dev, "bad descriptor, ignoring hub\n"); return -EIO; } /* Multiple endpoints? What kind of mutant ninja-hub is this? */ if (desc->desc.bNumEndpoints != 1) goto descriptor_error; endpoint = &desc->endpoint[0].desc; /* If it's not an interrupt in endpoint, we'd better punt! */ if (!usb_endpoint_is_int_in(endpoint)) goto descriptor_error; /* We found a hub */ dev_info (&intf->dev, "USB hub found\n"); hub = kzalloc(sizeof(*hub), GFP_KERNEL); if (!hub) { dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n"); return -ENOMEM; } #ifdef CONFIG_MTK_ICUSB_SUPPORT static int getRootHub = 0; if(!getRootHub) { getRootHub = 1; gt_rootHub = hub; //3.10 specific netlink_sock = netlink_kernel_create(&init_net, NETLINK_USERSOCK, &nl_cfg); } #endif #ifdef ORG_SUSPEND_RESUME_TEST static int getRootHub = 0; if(!getRootHub) { getRootHub = 1; gt_rootHub = hub; create_ic_tmp_entry(); } #endif kref_init(&hub->kref); INIT_LIST_HEAD(&hub->event_list); hub->intfdev = &intf->dev; hub->hdev = hdev; INIT_DELAYED_WORK(&hub->leds, led_work); INIT_DELAYED_WORK(&hub->init_work, NULL); usb_get_intf(intf); usb_set_intfdata (intf, hub); intf->needs_remote_wakeup = 1; pm_suspend_ignore_children(&intf->dev, true); if (hdev->speed == USB_SPEED_HIGH) highspeed_hubs++; if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND) hub->quirk_check_port_auto_suspend = 1; if (hub_configure(hub, endpoint) >= 0) return 0; hub_disconnect (intf); return -ENODEV; } static int hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data) { struct usb_device *hdev = interface_to_usbdev (intf); struct usb_hub *hub = usb_hub_to_struct_hub(hdev); /* assert ifno == 0 (part of hub spec) */ switch (code) { case USBDEVFS_HUB_PORTINFO: { struct usbdevfs_hub_portinfo *info = user_data; int i; spin_lock_irq(&device_state_lock); if (hdev->devnum <= 0) info->nports = 0; else { info->nports = hdev->maxchild; for (i = 0; i < info->nports; i++) { if (hub->ports[i]->child == NULL) info->port[i] = 0; else info->port[i] = hub->ports[i]->child->devnum; } } spin_unlock_irq(&device_state_lock); return info->nports + 1; } default: return -ENOSYS; } } /* * Allow user programs to claim ports on a hub. When a device is attached * to one of these "claimed" ports, the program will "own" the device. */ static int find_port_owner(struct usb_device *hdev, unsigned port1, struct dev_state ***ppowner) { if (hdev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (port1 == 0 || port1 > hdev->maxchild) return -EINVAL; /* This assumes that devices not managed by the hub driver * will always have maxchild equal to 0. */ *ppowner = &(usb_hub_to_struct_hub(hdev)->ports[port1 - 1]->port_owner); return 0; } /* In the following three functions, the caller must hold hdev's lock */ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, struct dev_state *owner) { int rc; struct dev_state **powner; rc = find_port_owner(hdev, port1, &powner); if (rc) return rc; if (*powner) return -EBUSY; *powner = owner; return rc; } int usb_hub_release_port(struct usb_device *hdev, unsigned port1, struct dev_state *owner) { int rc; struct dev_state **powner; rc = find_port_owner(hdev, port1, &powner); if (rc) return rc; if (*powner != owner) return -ENOENT; *powner = NULL; return rc; } void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); int n; for (n = 0; n < hdev->maxchild; n++) { if (hub->ports[n]->port_owner == owner) hub->ports[n]->port_owner = NULL; } } /* The caller must hold udev's lock */ bool usb_device_is_owned(struct usb_device *udev) { struct usb_hub *hub; if (udev->state == USB_STATE_NOTATTACHED || !udev->parent) return false; hub = usb_hub_to_struct_hub(udev->parent); return !!hub->ports[udev->portnum - 1]->port_owner; } static void recursively_mark_NOTATTACHED(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev); int i; for (i = 0; i < udev->maxchild; ++i) { if (hub->ports[i]->child) recursively_mark_NOTATTACHED(hub->ports[i]->child); } if (udev->state == USB_STATE_SUSPENDED) udev->active_duration -= jiffies; udev->state = USB_STATE_NOTATTACHED; } /** * usb_set_device_state - change a device's current state (usbcore, hcds) * @udev: pointer to device whose state should be changed * @new_state: new state value to be stored * * udev->state is _not_ fully protected by the device lock. Although * most transitions are made only while holding the lock, the state can * can change to USB_STATE_NOTATTACHED at almost any time. This * is so that devices can be marked as disconnected as soon as possible, * without having to wait for any semaphores to be released. As a result, * all changes to any device's state must be protected by the * device_state_lock spinlock. * * Once a device has been added to the device tree, all changes to its state * should be made using this routine. The state should _not_ be set directly. * * If udev->state is already USB_STATE_NOTATTACHED then no change is made. * Otherwise udev->state is set to new_state, and if new_state is * USB_STATE_NOTATTACHED then all of udev's descendants' states are also set * to USB_STATE_NOTATTACHED. */ void usb_set_device_state(struct usb_device *udev, enum usb_device_state new_state) { unsigned long flags; int wakeup = -1; spin_lock_irqsave(&device_state_lock, flags); if (udev->state == USB_STATE_NOTATTACHED) ; /* do nothing */ else if (new_state != USB_STATE_NOTATTACHED) { /* root hub wakeup capabilities are managed out-of-band * and may involve silicon errata ... ignore them here. */ if (udev->parent) { if (udev->state == USB_STATE_SUSPENDED || new_state == USB_STATE_SUSPENDED) ; /* No change to wakeup settings */ else if (new_state == USB_STATE_CONFIGURED) wakeup = (udev->quirks & USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 : udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP; else wakeup = 0; } if (udev->state == USB_STATE_SUSPENDED && new_state != USB_STATE_SUSPENDED) udev->active_duration -= jiffies; else if (new_state == USB_STATE_SUSPENDED && udev->state != USB_STATE_SUSPENDED) udev->active_duration += jiffies; udev->state = new_state; } else recursively_mark_NOTATTACHED(udev); spin_unlock_irqrestore(&device_state_lock, flags); if (wakeup >= 0) device_set_wakeup_capable(&udev->dev, wakeup); } EXPORT_SYMBOL_GPL(usb_set_device_state); /* * Choose a device number. * * Device numbers are used as filenames in usbfs. On USB-1.1 and * USB-2.0 buses they are also used as device addresses, however on * USB-3.0 buses the address is assigned by the controller hardware * and it usually is not the same as the device number. * * WUSB devices are simple: they have no hubs behind, so the mapping * device <-> virtual port number becomes 1:1. Why? to simplify the * life of the device connection logic in * drivers/usb/wusbcore/devconnect.c. When we do the initial secret * handshake we need to assign a temporary address in the unauthorized * space. For simplicity we use the first virtual port number found to * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()] * and that becomes it's address [X < 128] or its unauthorized address * [X | 0x80]. * * We add 1 as an offset to the one-based USB-stack port number * (zero-based wusb virtual port index) for two reasons: (a) dev addr * 0 is reserved by USB for default address; (b) Linux's USB stack * uses always #1 for the root hub of the controller. So USB stack's * port #1, which is wusb virtual-port #0 has address #2. * * Devices connected under xHCI are not as simple. The host controller * supports virtualization, so the hardware assigns device addresses and * the HCD must setup data structures before issuing a set address * command to the hardware. */ static void choose_devnum(struct usb_device *udev) { int devnum; struct usb_bus *bus = udev->bus; /* If khubd ever becomes multithreaded, this will need a lock */ if (udev->wusb) { devnum = udev->portnum + 1; BUG_ON(test_bit(devnum, bus->devmap.devicemap)); } else { /* Try to allocate the next devnum beginning at * bus->devnum_next. */ devnum = find_next_zero_bit(bus->devmap.devicemap, 128, bus->devnum_next); if (devnum >= 128) devnum = find_next_zero_bit(bus->devmap.devicemap, 128, 1); bus->devnum_next = ( devnum >= 127 ? 1 : devnum + 1); } if (devnum < 128) { set_bit(devnum, bus->devmap.devicemap); udev->devnum = devnum; } } static void release_devnum(struct usb_device *udev) { if (udev->devnum > 0) { clear_bit(udev->devnum, udev->bus->devmap.devicemap); udev->devnum = -1; } } static void update_devnum(struct usb_device *udev, int devnum) { /* The address for a WUSB device is managed by wusbcore. */ if (!udev->wusb) udev->devnum = devnum; } static void hub_free_dev(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* Root hubs aren't real devices, so don't free HCD resources */ if (hcd->driver->free_dev && udev->parent) hcd->driver->free_dev(hcd, udev); } /** * usb_disconnect - disconnect a device (usbcore-internal) * @pdev: pointer to device being disconnected * Context: !in_interrupt () * * Something got disconnected. Get rid of it and all of its children. * * If *pdev is a normal device then the parent hub must already be locked. * If *pdev is a root hub then this routine will acquire the * usb_bus_list_lock on behalf of the caller. * * Only hub drivers (including virtual root hub drivers for host * controllers) should ever call this. * * This call is synchronous, and may not be used in an interrupt context. */ void usb_disconnect(struct usb_device **pdev) { struct usb_device *udev = *pdev; struct usb_hub *hub = usb_hub_to_struct_hub(udev); int i; #ifdef CONFIG_MTK_ICUSB_SUPPORT struct timeval tv_begin, tv_end; struct timeval tv_before, tv_after; do_gettimeofday(&tv_begin); #endif /* mark the device as inactive, so any further urb submissions for * this device (and any of its children) will fail immediately. * this quiesces everything except pending urbs. */ usb_set_device_state(udev, USB_STATE_NOTATTACHED); dev_info(&udev->dev, "USB disconnect, device number %d\n", udev->devnum); usb_lock_device(udev); /* Free up all the children before we remove this device */ for (i = 0; i < udev->maxchild; i++) { if (hub->ports[i]->child) usb_disconnect(&hub->ports[i]->child); } /* deallocate hcd/hardware state ... nuking all pending urbs and * cleaning up all state associated with the current configuration * so that the hardware is now fully quiesced. */ dev_dbg (&udev->dev, "unregistering device\n"); #ifdef CONFIG_MTK_ICUSB_SUPPORT do_gettimeofday(&tv_before); #endif usb_disable_device(udev, 0); #ifdef CONFIG_MTK_ICUSB_SUPPORT do_gettimeofday(&tv_after); MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (tv_after.tv_sec - tv_before.tv_sec), (tv_after.tv_usec - tv_before.tv_usec)); #endif usb_hcd_synchronize_unlinks(udev); if (udev->parent) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; sysfs_remove_link(&udev->dev.kobj, "port"); sysfs_remove_link(&port_dev->dev.kobj, "device"); if (!port_dev->did_runtime_put) pm_runtime_put(&port_dev->dev); else port_dev->did_runtime_put = false; } #ifdef CONFIG_MTK_ICUSB_SUPPORT do_gettimeofday(&tv_before); #endif usb_remove_ep_devs(&udev->ep0); #ifdef CONFIG_MTK_ICUSB_SUPPORT do_gettimeofday(&tv_after); MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (tv_after.tv_sec - tv_before.tv_sec), (tv_after.tv_usec - tv_before.tv_usec)); #endif usb_unlock_device(udev); /* Unregister the device. The device driver is responsible * for de-configuring the device and invoking the remove-device * notifier chain (used by usbfs and possibly others). */ #ifdef CONFIG_MTK_ICUSB_SUPPORT do_gettimeofday(&tv_before); #endif device_del(&udev->dev); #ifdef CONFIG_MTK_ICUSB_SUPPORT do_gettimeofday(&tv_after); MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (tv_after.tv_sec - tv_before.tv_sec), (tv_after.tv_usec - tv_before.tv_usec)); #endif /* Free the device number and delete the parent's children[] * (or root_hub) pointer. */ release_devnum(udev); /* Avoid races with recursively_mark_NOTATTACHED() */ spin_lock_irq(&device_state_lock); *pdev = NULL; spin_unlock_irq(&device_state_lock); hub_free_dev(udev); put_device(&udev->dev); #ifdef CONFIG_MTK_ICUSB_SUPPORT set_usb11_sts_disconnect_done(); do_gettimeofday(&tv_end); MYDBG("time spent, sec : %d, usec : %d\n", (tv_end.tv_sec - tv_begin.tv_sec), (tv_end.tv_usec - tv_begin.tv_usec)); #endif } #ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES static void show_string(struct usb_device *udev, char *id, char *string) { if (!string) return; dev_info(&udev->dev, "%s: %s\n", id, string); } static void announce_device(struct usb_device *udev) { dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); dev_info(&udev->dev, "New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n", udev->descriptor.iManufacturer, udev->descriptor.iProduct, udev->descriptor.iSerialNumber); show_string(udev, "Product", udev->product); show_string(udev, "Manufacturer", udev->manufacturer); show_string(udev, "SerialNumber", udev->serial); } #else static inline void announce_device(struct usb_device *udev) { } #endif #ifdef CONFIG_USB_OTG #include "otg_whitelist.h" #endif /** * usb_enumerate_device_otg - FIXME (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * * Finish enumeration for On-The-Go devices */ static int usb_enumerate_device_otg(struct usb_device *udev) { int err = 0; #ifdef CONFIG_USB_OTG /* * OTG-aware devices on OTG-capable root hubs may be able to use SRP, * to wake us after we've powered off VBUS; and HNP, switching roles * "host" to "peripheral". The OTG descriptor helps figure this out. */ if (!udev->bus->is_b_host && udev->config && udev->parent == udev->bus->root_hub) { struct usb_otg_descriptor *desc = NULL; struct usb_bus *bus = udev->bus; /* descriptor may appear anywhere in config */ if (__usb_get_extra_descriptor (udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), USB_DT_OTG, (void **) &desc) == 0) { if (desc->bmAttributes & USB_OTG_HNP) { unsigned port1 = udev->portnum; dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n", (port1 == bus->otg_port) ? "" : "non-"); /* enable HNP before suspend, it's simpler */ if (port1 == bus->otg_port) bus->b_hnp_enable = 1; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, bus->b_hnp_enable ? USB_DEVICE_B_HNP_ENABLE : USB_DEVICE_A_ALT_HNP_SUPPORT, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { /* OTG MESSAGE: report errors here, * customize to match your product. */ dev_info(&udev->dev, "can't set HNP mode: %d\n", err); bus->b_hnp_enable = 0; } } } } if (!is_targeted(udev)) { /* Maybe it can talk to us, though we can't talk to it. * (Includes HNP test device.) */ if (udev->bus->b_hnp_enable || udev->bus->is_b_host) { err = usb_port_suspend(udev, PMSG_SUSPEND); if (err < 0) dev_dbg(&udev->dev, "HNP fail, %d\n", err); } err = -ENOTSUPP; goto fail; } fail: #endif return err; } /** * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * * This is only called by usb_new_device() and usb_authorize_device() * and FIXME -- all comments that apply to them apply here wrt to * environment. * * If the device is WUSB and not authorized, we don't attempt to read * the string descriptors, as they will be errored out by the device * until it has been authorized. */ static int usb_enumerate_device(struct usb_device *udev) { int err; if (udev->config == NULL) { err = usb_get_configuration(udev); if (err < 0) { if (err != -ENODEV) dev_err(&udev->dev, "can't read configurations, error %d\n", err); return err; } } /* read the standard strings and cache them if present */ udev->product = usb_cache_string(udev, udev->descriptor.iProduct); udev->manufacturer = usb_cache_string(udev, udev->descriptor.iManufacturer); udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); err = usb_enumerate_device_otg(udev); if (err < 0) return err; usb_detect_interface_quirks(udev); return 0; } static void set_usb_port_removable(struct usb_device *udev) { struct usb_device *hdev = udev->parent; struct usb_hub *hub; u8 port = udev->portnum; u16 wHubCharacteristics; bool removable = true; if (!hdev) return; hub = usb_hub_to_struct_hub(udev->parent); wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); if (!(wHubCharacteristics & HUB_CHAR_COMPOUND)) return; if (hub_is_superspeed(hdev)) { if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable) & (1 << port)) removable = false; } else { if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8))) removable = false; } if (removable) udev->removable = USB_DEVICE_REMOVABLE; else udev->removable = USB_DEVICE_FIXED; } /** * usb_new_device - perform initial device setup (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * * This is called with devices which have been detected but not fully * enumerated. The device descriptor is available, but not descriptors * for any device configuration. The caller must have locked either * the parent hub (if udev is a normal device) or else the * usb_bus_list_lock (if udev is a root hub). The parent's pointer to * udev has already been installed, but udev is not yet visible through * sysfs or other filesystem code. * * It will return if the device is configured properly or not. Zero if * the interface was registered with the driver core; else a negative * errno value. * * This call is synchronous, and may not be used in an interrupt context. * * Only the hub driver or root-hub registrar should ever call this. */ int usb_new_device(struct usb_device *udev) { int err; if (udev->parent) { /* Initialize non-root-hub device wakeup to disabled; * device (un)configuration controls wakeup capable * sysfs power/wakeup controls wakeup enabled/disabled */ device_init_wakeup(&udev->dev, 0); //MYDBG("udev :%x\n", (unsigned int)udev); MYDBG("udev :%lx\n", (unsigned long)udev); } /* Tell the runtime-PM framework the device is active */ pm_runtime_set_active(&udev->dev); pm_runtime_get_noresume(&udev->dev); pm_runtime_use_autosuspend(&udev->dev); pm_runtime_enable(&udev->dev); /* By default, forbid autosuspend for all devices. It will be * allowed for hubs during binding. */ usb_disable_autosuspend(udev); err = usb_enumerate_device(udev); /* Read descriptors */ if (err < 0) goto fail; dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", udev->devnum, udev->bus->busnum, (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); /* export the usbdev device-node for libusb */ udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); /* Tell the world! */ announce_device(udev); if (udev->serial) add_device_randomness(udev->serial, strlen(udev->serial)); if (udev->product) add_device_randomness(udev->product, strlen(udev->product)); if (udev->manufacturer) add_device_randomness(udev->manufacturer, strlen(udev->manufacturer)); device_enable_async_suspend(&udev->dev); /* * check whether the hub marks this port as non-removable. Do it * now so that platform-specific data can override it in * device_add() */ if (udev->parent) set_usb_port_removable(udev); /* Register the device. The device driver is responsible * for configuring the device and invoking the add-device * notifier chain (used by usbfs and possibly others). */ err = device_add(&udev->dev); if (err) { dev_err(&udev->dev, "can't device_add, error %d\n", err); goto fail; } /* Create link files between child device and usb port device. */ if (udev->parent) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; err = sysfs_create_link(&udev->dev.kobj, &port_dev->dev.kobj, "port"); if (err) goto fail; err = sysfs_create_link(&port_dev->dev.kobj, &udev->dev.kobj, "device"); if (err) { sysfs_remove_link(&udev->dev.kobj, "port"); goto fail; } pm_runtime_get_sync(&port_dev->dev); } (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev); usb_mark_last_busy(udev); pm_runtime_put_sync_autosuspend(&udev->dev); return err; fail: usb_set_device_state(udev, USB_STATE_NOTATTACHED); pm_runtime_disable(&udev->dev); pm_runtime_set_suspended(&udev->dev); return err; } /** * usb_deauthorize_device - deauthorize a device (usbcore-internal) * @usb_dev: USB device * * Move the USB device to a very basic state where interfaces are disabled * and the device is in fact unconfigured and unusable. * * We share a lock (that we have) with device_del(), so we need to * defer its call. */ int usb_deauthorize_device(struct usb_device *usb_dev) { usb_lock_device(usb_dev); if (usb_dev->authorized == 0) goto out_unauthorized; usb_dev->authorized = 0; usb_set_configuration(usb_dev, -1); out_unauthorized: usb_unlock_device(usb_dev); return 0; } int usb_authorize_device(struct usb_device *usb_dev) { int result = 0, c; usb_lock_device(usb_dev); if (usb_dev->authorized == 1) goto out_authorized; result = usb_autoresume_device(usb_dev); if (result < 0) { dev_err(&usb_dev->dev, "can't autoresume for authorization: %d\n", result); goto error_autoresume; } result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor)); if (result < 0) { dev_err(&usb_dev->dev, "can't re-read device descriptor for " "authorization: %d\n", result); goto error_device_descriptor; } usb_dev->authorized = 1; /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. */ c = usb_choose_configuration(usb_dev); if (c >= 0) { result = usb_set_configuration(usb_dev, c); if (result) { dev_err(&usb_dev->dev, "can't set config #%d, error %d\n", c, result); /* This need not be fatal. The user can try to * set other configurations. */ } } dev_info(&usb_dev->dev, "authorized to connect\n"); error_device_descriptor: usb_autosuspend_device(usb_dev); error_autoresume: out_authorized: usb_unlock_device(usb_dev); // complements locktree return result; } /* Returns 1 if @hub is a WUSB root hub, 0 otherwise */ static unsigned hub_is_wusb(struct usb_hub *hub) { struct usb_hcd *hcd; if (hub->hdev->parent != NULL) /* not a root hub? */ return 0; hcd = container_of(hub->hdev->bus, struct usb_hcd, self); return hcd->wireless; } #define PORT_RESET_TRIES 5 #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) #define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first) #define HUB_ROOT_RESET_TIME 50 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 #define HUB_BH_RESET_TIME 50 #define HUB_LONG_RESET_TIME 200 #define HUB_RESET_TIMEOUT 800 static int hub_port_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm); /* Is a USB 3.0 port in the Inactive or Complinance Mode state? * Port worm reset is required to recover */ static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus) { return hub_is_superspeed(hub->hdev) && (((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_SS_INACTIVE) || ((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_COMP_MOD)) ; } static int hub_port_wait_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm) { int delay_time, ret; u16 portstatus; u16 portchange; for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT; delay_time += delay) { /* wait to give the device a chance to reset */ msleep(delay); /* read and decode port status */ MYDBG(""); ret = hub_port_status(hub, port1, &portstatus, &portchange); MYDBG(""); if (ret < 0) return ret; /* The port state is unknown until the reset completes. */ if (!(portstatus & USB_PORT_STAT_RESET)) break; /* switch to the long delay after two short delay failures */ if (delay_time >= 2 * HUB_SHORT_RESET_TIME) delay = HUB_LONG_RESET_TIME; dev_dbg (hub->intfdev, "port %d not %sreset yet, waiting %dms\n", port1, warm ? "warm " : "", delay); } if ((portstatus & USB_PORT_STAT_RESET)) return -EBUSY; if (hub_port_warm_reset_required(hub, portstatus)) return -ENOTCONN; /* Device went away? */ if (!(portstatus & USB_PORT_STAT_CONNECTION)) return -ENOTCONN; /* bomb out completely if the connection bounced. A USB 3.0 * connection may bounce if multiple warm resets were issued, * but the device may have successfully re-connected. Ignore it. */ if (!hub_is_superspeed(hub->hdev) && (portchange & USB_PORT_STAT_C_CONNECTION)) return -ENOTCONN; if (!(portstatus & USB_PORT_STAT_ENABLE)) return -EBUSY; if (!udev) return 0; if (hub_is_wusb(hub)) udev->speed = USB_SPEED_WIRELESS; else if (hub_is_superspeed(hub->hdev)) udev->speed = USB_SPEED_SUPER; else if (portstatus & USB_PORT_STAT_HIGH_SPEED) udev->speed = USB_SPEED_HIGH; else if (portstatus & USB_PORT_STAT_LOW_SPEED) udev->speed = USB_SPEED_LOW; else udev->speed = USB_SPEED_FULL; return 0; } static void hub_port_finish_reset(struct usb_hub *hub, int port1, struct usb_device *udev, int *status) { switch (*status) { case 0: /* TRSTRCY = 10 ms; plus some extra */ msleep(10 + 40); if (udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); update_devnum(udev, 0); /* The xHC may think the device is already reset, * so ignore the status. */ if (hcd->driver->reset_device) hcd->driver->reset_device(hcd, udev); } /* FALL THROUGH */ case -ENOTCONN: case -ENODEV: usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); if (hub_is_superspeed(hub->hdev)) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (udev) usb_set_device_state(udev, *status ? USB_STATE_NOTATTACHED : USB_STATE_DEFAULT); break; } } /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */ static int hub_port_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm) { int i, status; u16 portchange, portstatus; if (!hub_is_superspeed(hub->hdev)) { if (warm) { dev_err(hub->intfdev, "only USB3 hub support " "warm reset\n"); return -EINVAL; } /* Block EHCI CF initialization during the port reset. * Some companion controllers don't like it when they mix. */ down_read(&ehci_cf_port_reset_rwsem); } else if (!warm) { /* * If the caller hasn't explicitly requested a warm reset, * double check and see if one is needed. */ status = hub_port_status(hub, port1, &portstatus, &portchange); if (status < 0) goto done; if (hub_port_warm_reset_required(hub, portstatus)) warm = true; } /* Reset the port */ for (i = 0; i < PORT_RESET_TRIES; i++) { MYDBG(""); status = set_port_feature(hub->hdev, port1, (warm ? USB_PORT_FEAT_BH_PORT_RESET : USB_PORT_FEAT_RESET)); MYDBG(""); if (status == -ENODEV) { MYDBG(""); ; /* The hub is gone */ } else if (status) { MYDBG(""); dev_err(hub->intfdev, "cannot %sreset port %d (err = %d)\n", warm ? "warm " : "", port1, status); } else { MYDBG(""); status = hub_port_wait_reset(hub, port1, udev, delay, warm); if (status && status != -ENOTCONN) { MYDBG(""); dev_dbg(hub->intfdev, "port_wait_reset: err = %d\n", status); } } MYDBG(""); /* Check for disconnect or reset */ if (status == 0 || status == -ENOTCONN || status == -ENODEV) { MYDBG(""); hub_port_finish_reset(hub, port1, udev, &status); MYDBG(""); if (!hub_is_superspeed(hub->hdev)) goto done; /* * If a USB 3.0 device migrates from reset to an error * state, re-issue the warm reset. */ if (hub_port_status(hub, port1, &portstatus, &portchange) < 0) goto done; if (!hub_port_warm_reset_required(hub, portstatus)) goto done; /* * If the port is in SS.Inactive or Compliance Mode, the * hot or warm reset failed. Try another warm reset. */ if (!warm) { dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n", port1); warm = true; } } MYDBG(""); dev_dbg (hub->intfdev, "port %d not enabled, trying %sreset again...\n", port1, warm ? "warm " : ""); delay = HUB_LONG_RESET_TIME; } MYDBG(""); dev_err (hub->intfdev, "Cannot enable port %i. Maybe the USB cable is bad?\n", port1); done: if (!hub_is_superspeed(hub->hdev)) { MYDBG(""); up_read(&ehci_cf_port_reset_rwsem); } MYDBG(""); return status; } /* Check if a port is power on */ static int port_is_power_on(struct usb_hub *hub, unsigned portstatus) { int ret = 0; if (hub_is_superspeed(hub->hdev)) { if (portstatus & USB_SS_PORT_STAT_POWER) ret = 1; } else { if (portstatus & USB_PORT_STAT_POWER) ret = 1; } return ret; } #ifdef CONFIG_PM /* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */ static int port_is_suspended(struct usb_hub *hub, unsigned portstatus) { int ret = 0; if (hub_is_superspeed(hub->hdev)) { if ((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U3) ret = 1; } else { if (portstatus & USB_PORT_STAT_SUSPEND) ret = 1; } return ret; } /* Determine whether the device on a port is ready for a normal resume, * is ready for a reset-resume, or should be disconnected. */ static int check_port_resume_type(struct usb_device *udev, struct usb_hub *hub, int port1, int status, unsigned portchange, unsigned portstatus) { /* Is the device still present? */ if (status || port_is_suspended(hub, portstatus) || !port_is_power_on(hub, portstatus) || !(portstatus & USB_PORT_STAT_CONNECTION)) { if (status >= 0) status = -ENODEV; } /* Can't do a normal resume if the port isn't enabled, * so try a reset-resume instead. */ else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) { if (udev->persist_enabled) udev->reset_resume = 1; else status = -ENODEV; } if (status) { dev_dbg(hub->intfdev, "port %d status %04x.%04x after resume, %d\n", port1, portchange, portstatus, status); } else if (udev->reset_resume) { /* Late port handoff can set status-change bits */ if (portchange & USB_PORT_STAT_C_CONNECTION) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); if (portchange & USB_PORT_STAT_C_ENABLE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); } return status; } int usb_disable_ltm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* Check if the roothub and device supports LTM. */ if (!usb_device_supports_ltm(hcd->self.root_hub) || !usb_device_supports_ltm(udev)) return 0; /* Clear Feature LTM Enable can only be sent if the device is * configured. */ if (!udev->actconfig) return 0; return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_LTM_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } EXPORT_SYMBOL_GPL(usb_disable_ltm); void usb_enable_ltm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* Check if the roothub and device supports LTM. */ if (!usb_device_supports_ltm(hcd->self.root_hub) || !usb_device_supports_ltm(udev)) return; /* Set Feature LTM Enable can only be sent if the device is * configured. */ if (!udev->actconfig) return; usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_LTM_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } EXPORT_SYMBOL_GPL(usb_enable_ltm); #ifdef CONFIG_PM /* * usb_disable_function_remotewakeup - disable usb3.0 * device's function remote wakeup * @udev: target device * * Assume there's only one function on the USB 3.0 * device and disable remote wake for the first * interface. FIXME if the interface association * descriptor shows there's more than one function. */ static int usb_disable_function_remotewakeup(struct usb_device *udev) { return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* Count of wakeup-enabled devices at or below udev */ static unsigned wakeup_enabled_descendants(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev); return udev->do_remote_wakeup + (hub ? hub->wakeup_enabled_descendants : 0); } /* * usb_port_suspend - suspend a usb device's upstream port * @udev: device that's no longer in active use, not a root hub * Context: must be able to sleep; device not locked; pm locks held * * Suspends a USB device that isn't in active use, conserving power. * Devices may wake out of a suspend, if anything important happens, * using the remote wakeup mechanism. They may also be taken out of * suspend by the host, using usb_port_resume(). It's also routine * to disconnect devices while they are suspended. * * This only affects the USB hardware for a device; its interfaces * (and, for hubs, child devices) must already have been suspended. * * Selective port suspend reduces power; most suspended devices draw * less than 500 uA. It's also used in OTG, along with remote wakeup. * All devices below the suspended port are also suspended. * * Devices leave suspend state when the host wakes them up. Some devices * also support "remote wakeup", where the device can activate the USB * tree above them to deliver data, such as a keypress or packet. In * some cases, this wakes the USB host. * * Suspending OTG devices may trigger HNP, if that's been enabled * between a pair of dual-role devices. That will change roles, such * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral. * * Devices on USB hub ports have only one "suspend" state, corresponding * to ACPI D2, "may cause the device to lose some context". * State transitions include: * * - suspend, resume ... when the VBUS power link stays live * - suspend, disconnect ... VBUS lost * * Once VBUS drop breaks the circuit, the port it's using has to go through * normal re-enumeration procedures, starting with enabling VBUS power. * Other than re-initializing the hub (plug/unplug, except for root hubs), * Linux (2.6) currently has NO mechanisms to initiate that: no khubd * timer, no SRP, no requests through sysfs. * * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get * suspended until their bus goes into global suspend (i.e., the root * hub is suspended). Nevertheless, we change @udev->state to * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual * upstream port setting is stored in @udev->port_is_suspended. * * Returns 0 on success, else negative errno. */ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; int port1 = udev->portnum; int status; bool really_suspend = true; #ifdef CONFIG_MTK_ICUSB_SUPPORT if(!is_usb11_enabled()) { MYDBG("usb11 is not enabled"); return 0; } MYDBG(""); #endif /* enable remote wakeup when appropriate; this lets the device * wake up the upstream hub (including maybe the root hub). * * NOTE: OTG devices may issue remote wakeup (or SRP) even when * we don't explicitly enable it here. */ if (udev->do_remote_wakeup) { if (!hub_is_superspeed(hub->hdev)) { status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } else { /* Assume there's only one function on the USB 3.0 * device and enable remote wake for the first * interface. FIXME if the interface association * descriptor shows there's more than one function. */ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE, USB_INTRF_FUNC_SUSPEND, USB_INTRF_FUNC_SUSPEND_RW | USB_INTRF_FUNC_SUSPEND_LP, NULL, 0, USB_CTRL_SET_TIMEOUT); } if (status) { dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", status); /* bail if autosuspend is requested */ if (PMSG_IS_AUTO(msg)) { MYDBG(""); goto err_wakeup; } } } /* disable USB2 hardware LPM */ if (udev->usb2_hw_lpm_enabled == 1) usb_set_usb2_hardware_lpm(udev, 0); if (usb_disable_ltm(udev)) { dev_err(&udev->dev, "Failed to disable LTM before suspend\n."); status = -ENOMEM; MYDBG(""); if (PMSG_IS_AUTO(msg)) goto err_ltm; } if (usb_unlocked_disable_lpm(udev)) { dev_err(&udev->dev, "Failed to disable LPM before suspend\n."); status = -ENOMEM; MYDBG(""); if (PMSG_IS_AUTO(msg)) goto err_lpm3; } /* see 7.1.7.6 */ if (hub_is_superspeed(hub->hdev)) { MYDBG(""); status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); /* * For system suspend, we do not need to enable the suspend feature * on individual USB-2 ports. The devices will automatically go * into suspend a few ms after the root hub stops sending packets. * The USB 2.0 spec calls this "global suspend". * * However, many USB hubs have a bug: They don't relay wakeup requests * from a downstream port if the port's suspend feature isn't on. * Therefore we will turn on the suspend feature if udev or any of its * descendants is enabled for remote wakeup. */ } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) { MYDBG(""); status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); } else { really_suspend = false; status = 0; } if (status) { dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", port1, status); MYDBG(""); /* Try to enable USB3 LPM and LTM again */ usb_unlocked_enable_lpm(udev); err_lpm3: usb_enable_ltm(udev); err_ltm: /* Try to enable USB2 hardware LPM again */ if (udev->usb2_hw_lpm_capable == 1) usb_set_usb2_hardware_lpm(udev, 1); if (udev->do_remote_wakeup) { if (udev->speed < USB_SPEED_SUPER) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); else usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } err_wakeup: /* System sleep transitions should never fail */ if (!PMSG_IS_AUTO(msg)) status = 0; } else { dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", (PMSG_IS_AUTO(msg) ? "auto-" : ""), udev->do_remote_wakeup); if (really_suspend) { udev->port_is_suspended = 1; /* device has up to 10 msec to fully suspend */ msleep(10); } usb_set_device_state(udev, USB_STATE_SUSPENDED); } if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) { pm_runtime_put_sync(&port_dev->dev); port_dev->did_runtime_put = true; } usb_mark_last_busy(hub->hdev); return status; } /* * If the USB "suspend" state is in use (rather than "global suspend"), * many devices will be individually taken out of suspend state using * special "resume" signaling. This routine kicks in shortly after * hardware resume signaling is finished, either because of selective * resume (by host) or remote wakeup (by device) ... now see what changed * in the tree that's rooted at this device. * * If @udev->reset_resume is set then the device is reset before the * status check is done. */ static int finish_port_resume(struct usb_device *udev) { int status = 0; u16 devstatus = 0; /* caller owns the udev device lock */ dev_dbg(&udev->dev, "%s\n", udev->reset_resume ? "finish reset-resume" : "finish resume"); /* usb ch9 identifies four variants of SUSPENDED, based on what * state the device resumes to. Linux currently won't see the * first two on the host side; they'd be inside hub_port_init() * during many timeouts, but khubd can't suspend until later. */ usb_set_device_state(udev, udev->actconfig ? USB_STATE_CONFIGURED : USB_STATE_ADDRESS); /* 10.5.4.5 says not to reset a suspended port if the attached * device is enabled for remote wakeup. Hence the reset * operation is carried out here, after the port has been * resumed. */ if (udev->reset_resume) retry_reset_resume: status = usb_reset_and_verify_device(udev); /* 10.5.4.5 says be sure devices in the tree are still there. * For now let's assume the device didn't go crazy on resume, * and device drivers will know about any resume quirks. */ if (status == 0) { devstatus = 0; status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus); if (status >= 0) status = (status > 0 ? 0 : -ENODEV); /* If a normal resume failed, try doing a reset-resume */ if (status && !udev->reset_resume && udev->persist_enabled) { dev_dbg(&udev->dev, "retry with reset-resume\n"); udev->reset_resume = 1; goto retry_reset_resume; } } if (status) { dev_dbg(&udev->dev, "gone after usb resume? status %d\n", status); /* * There are a few quirky devices which violate the standard * by claiming to have remote wakeup enabled after a reset, * which crash if the feature is cleared, hence check for * udev->reset_resume */ } else if (udev->actconfig && !udev->reset_resume) { if (!hub_is_superspeed(udev->parent)) { le16_to_cpus(&devstatus); if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } else { status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, &devstatus); le16_to_cpus(&devstatus); if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW)) status = usb_disable_function_remotewakeup(udev); } if (status) dev_dbg(&udev->dev, "disable remote wakeup, status %d\n", status); status = 0; } return status; } /* * There are some SS USB devices which take longer time for link training. * XHCI specs 4.19.4 says that when Link training is successful, port * sets CSC bit to 1. So if SW reads port status before successful link * training, then it will not find device to be present. * USB Analyzer log with such buggy devices show that in some cases * device switch on the RX termination after long delay of host enabling * the VBUS. In few other cases it has been seen that device fails to * negotiate link training in first attempt. It has been * reported till now that few devices take as long as 2000 ms to train * the link after host enabling its VBUS and termination. Following * routine implements a 2000 ms timeout for link training. If in a case * link trains before timeout, loop will exit earlier. * * FIXME: If a device was connected before suspend, but was removed * while system was asleep, then the loop in the following routine will * only exit at timeout. * * This routine should only be called when persist is enabled for a SS * device. */ static int wait_for_ss_port_enable(struct usb_device *udev, struct usb_hub *hub, int *port1, u16 *portchange, u16 *portstatus) { int status = 0, delay_ms = 0; while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); } return status; } /* * usb_port_resume - re-activate a suspended usb device's upstream port * @udev: device to re-activate, not a root hub * Context: must be able to sleep; device not locked; pm locks held * * This will re-activate the suspended device, increasing power usage * while letting drivers communicate again with its endpoints. * USB resume explicitly guarantees that the power session between * the host and the device is the same as it was when the device * suspended. * * If @udev->reset_resume is set then this routine won't check that the * port is still enabled. Furthermore, finish_port_resume() above will * reset @udev. The end result is that a broken power session can be * recovered and @udev will appear to persist across a loss of VBUS power. * * For example, if a host controller doesn't maintain VBUS suspend current * during a system sleep or is reset when the system wakes up, all the USB * power sessions below it will be broken. This is especially troublesome * for mass-storage devices containing mounted filesystems, since the * device will appear to have disconnected and all the memory mappings * to it will be lost. Using the USB_PERSIST facility, the device can be * made to appear as if it had not disconnected. * * This facility can be dangerous. Although usb_reset_and_verify_device() makes * every effort to insure that the same device is present after the * reset as before, it cannot provide a 100% guarantee. Furthermore it's * quite possible for a device to remain unaltered but its media to be * changed. If the user replaces a flash memory card while the system is * asleep, he will have only himself to blame when the filesystem on the * new card is corrupted and the system crashes. * * Returns 0 on success, else negative errno. */ int usb_port_resume(struct usb_device *udev, pm_message_t msg) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; int port1 = udev->portnum; int status; u16 portchange, portstatus; #ifdef CONFIG_MTK_ICUSB_SUPPORT if(!is_usb11_enabled()) { MYDBG("usb11 is not enabled"); return 0; } MYDBG(""); #endif if (port_dev->did_runtime_put) { status = pm_runtime_get_sync(&port_dev->dev); port_dev->did_runtime_put = false; if (status < 0) { dev_dbg(&udev->dev, "can't resume usb port, status %d\n", status); return status; } } /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); if (status == 0 && !port_is_suspended(hub, portstatus)) goto SuspendCleared; // dev_dbg(hub->intfdev, "resume port %d\n", port1); set_bit(port1, hub->busy_bits); /* see 7.1.7.7; affects power usage, but not budgeting */ if (hub_is_superspeed(hub->hdev)) status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0); else status = usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); if (status) { dev_dbg(hub->intfdev, "can't resume port %d, status %d\n", port1, status); } else { /* drive resume for USB_RESUME_TIMEOUT msec */ dev_dbg(&udev->dev, "usb %sresume\n", (PMSG_IS_AUTO(msg) ? "auto-" : "")); msleep(USB_RESUME_TIMEOUT); /* Virtual root hubs can trigger on GET_PORT_STATUS to * stop resume signaling. Then finish the resume * sequence. */ status = hub_port_status(hub, port1, &portstatus, &portchange); /* TRSMRCY = 10 msec */ msleep(10); } SuspendCleared: if (status == 0) { udev->port_is_suspended = 0; if (hub_is_superspeed(hub->hdev)) { if (portchange & USB_PORT_STAT_C_LINK_STATE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); } else { if (portchange & USB_PORT_STAT_C_SUSPEND) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_SUSPEND); } } clear_bit(port1, hub->busy_bits); if (udev->persist_enabled && hub_is_superspeed(hub->hdev)) status = wait_for_ss_port_enable(udev, hub, &port1, &portchange, &portstatus); status = check_port_resume_type(udev, hub, port1, status, portchange, portstatus); if (status == 0) status = finish_port_resume(udev); if (status < 0) { dev_dbg(&udev->dev, "can't resume, status %d\n", status); hub_port_logical_disconnect(hub, port1); } else { /* Try to enable USB2 hardware LPM */ if (udev->usb2_hw_lpm_capable == 1) usb_set_usb2_hardware_lpm(udev, 1); /* Try to enable USB3 LTM and LPM */ usb_enable_ltm(udev); usb_unlocked_enable_lpm(udev); } return status; } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME /* caller has locked udev */ int usb_remote_wakeup(struct usb_device *udev) { int status = 0; if (udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); status = usb_autoresume_device(udev); if (status == 0) { /* Let the drivers do their thing, then... */ usb_autosuspend_device(udev); } } return status; } #endif static int check_ports_changed(struct usb_hub *hub) { int port1; for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) { u16 portstatus, portchange; int status; status = hub_port_status(hub, port1, &portstatus, &portchange); if (!status && portchange) return 1; } return 0; } static int hub_suspend(struct usb_interface *intf, pm_message_t msg) { struct usb_hub *hub = usb_get_intfdata (intf); struct usb_device *hdev = hub->hdev; unsigned port1; int status; /* * Warn if children aren't already suspended. * Also, add up the number of wakeup-enabled descendants. */ hub->wakeup_enabled_descendants = 0; for (port1 = 1; port1 <= hdev->maxchild; port1++) { struct usb_device *udev; udev = hub->ports[port1 - 1]->child; if (udev && udev->can_submit) { dev_warn(&intf->dev, "port %d nyet suspended\n", port1); if (PMSG_IS_AUTO(msg)) return -EBUSY; } if (udev) hub->wakeup_enabled_descendants += wakeup_enabled_descendants(udev); } if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { /* check if there are changes pending on hub ports */ if (check_ports_changed(hub)) { if (PMSG_IS_AUTO(msg)) return -EBUSY; pm_wakeup_event(&hdev->dev, 2000); } } if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) { /* Enable hub to send remote wakeup for all ports. */ for (port1 = 1; port1 <= hdev->maxchild; port1++) { status = set_port_feature(hdev, port1 | USB_PORT_FEAT_REMOTE_WAKE_CONNECT | USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT | USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT, USB_PORT_FEAT_REMOTE_WAKE_MASK); } } dev_dbg(&intf->dev, "%s\n", __func__); /* stop khubd and related activity */ hub_quiesce(hub, HUB_SUSPEND); return 0; } static int hub_resume(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); dev_dbg(&intf->dev, "%s\n", __func__); hub_activate(hub, HUB_RESUME); return 0; } static int hub_reset_resume(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); dev_dbg(&intf->dev, "%s\n", __func__); hub_activate(hub, HUB_RESET_RESUME); return 0; } /** * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power * @rhdev: struct usb_device for the root hub * * The USB host controller driver calls this function when its root hub * is resumed and Vbus power has been interrupted or the controller * has been reset. The routine marks @rhdev as having lost power. * When the hub driver is resumed it will take notice and carry out * power-session recovery for all the "USB-PERSIST"-enabled child devices; * the others will be disconnected. */ void usb_root_hub_lost_power(struct usb_device *rhdev) { dev_warn(&rhdev->dev, "root hub lost power or was reset\n"); rhdev->reset_resume = 1; } EXPORT_SYMBOL_GPL(usb_root_hub_lost_power); static const char * const usb3_lpm_names[] = { "U0", "U1", "U2", "U3", }; /* * Send a Set SEL control transfer to the device, prior to enabling * device-initiated U1 or U2. This lets the device know the exit latencies from * the time the device initiates a U1 or U2 exit, to the time it will receive a * packet from the host. * * This function will fail if the SEL or PEL values for udev are greater than * the maximum allowed values for the link state to be enabled. */ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state) { struct usb_set_sel_req *sel_values; unsigned long long u1_sel; unsigned long long u1_pel; unsigned long long u2_sel; unsigned long long u2_pel; int ret; /* Convert SEL and PEL stored in ns to us */ u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); /* * Make sure that the calculated SEL and PEL values for the link * state we're enabling aren't bigger than the max SEL/PEL * value that will fit in the SET SEL control transfer. * Otherwise the device would get an incorrect idea of the exit * latency for the link state, and could start a device-initiated * U1/U2 when the exit latencies are too high. */ if ((state == USB3_LPM_U1 && (u1_sel > USB3_LPM_MAX_U1_SEL_PEL || u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) || (state == USB3_LPM_U2 && (u2_sel > USB3_LPM_MAX_U2_SEL_PEL || u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) { dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n", usb3_lpm_names[state], u1_sel, u1_pel); return -EINVAL; } /* * If we're enabling device-initiated LPM for one link state, * but the other link state has a too high SEL or PEL value, * just set those values to the max in the Set SEL request. */ if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL) u1_sel = USB3_LPM_MAX_U1_SEL_PEL; if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL) u1_pel = USB3_LPM_MAX_U1_SEL_PEL; if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL) u2_sel = USB3_LPM_MAX_U2_SEL_PEL; if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL) u2_pel = USB3_LPM_MAX_U2_SEL_PEL; /* * usb_enable_lpm() can be called as part of a failed device reset, * which may be initiated by an error path of a mass storage driver. * Therefore, use GFP_NOIO. */ sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO); if (!sel_values) return -ENOMEM; sel_values->u1_sel = u1_sel; sel_values->u1_pel = u1_pel; sel_values->u2_sel = cpu_to_le16(u2_sel); sel_values->u2_pel = cpu_to_le16(u2_pel); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_SEL, USB_RECIP_DEVICE, 0, 0, sel_values, sizeof *(sel_values), USB_CTRL_SET_TIMEOUT); kfree(sel_values); return ret; } /* * Enable or disable device-initiated U1 or U2 transitions. */ static int usb_set_device_initiated_lpm(struct usb_device *udev, enum usb3_link_state state, bool enable) { int ret; int feature; switch (state) { case USB3_LPM_U1: feature = USB_DEVICE_U1_ENABLE; break; case USB3_LPM_U2: feature = USB_DEVICE_U2_ENABLE; break; default: dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n", __func__, enable ? "enable" : "disable"); return -EINVAL; } if (udev->state != USB_STATE_CONFIGURED) { dev_dbg(&udev->dev, "%s: Can't %s %s state " "for unconfigured device.\n", __func__, enable ? "enable" : "disable", usb3_lpm_names[state]); return 0; } if (enable) { /* * Now send the control transfer to enable device-initiated LPM * for either U1 or U2. */ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } else { ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } if (ret < 0) { dev_warn(&udev->dev, "%s of device-initiated %s failed.\n", enable ? "Enable" : "Disable", usb3_lpm_names[state]); return -EBUSY; } return 0; } static int usb_set_lpm_timeout(struct usb_device *udev, enum usb3_link_state state, int timeout) { int ret; int feature; switch (state) { case USB3_LPM_U1: feature = USB_PORT_FEAT_U1_TIMEOUT; break; case USB3_LPM_U2: feature = USB_PORT_FEAT_U2_TIMEOUT; break; default: dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n", __func__); return -EINVAL; } if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT && timeout != USB3_LPM_DEVICE_INITIATED) { dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, " "which is a reserved value.\n", usb3_lpm_names[state], timeout); return -EINVAL; } ret = set_port_feature(udev->parent, USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum, feature); if (ret < 0) { dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x," "error code %i\n", usb3_lpm_names[state], timeout, ret); return -EBUSY; } if (state == USB3_LPM_U1) udev->u1_params.timeout = timeout; else udev->u2_params.timeout = timeout; return 0; } /* * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated * U1/U2 entry. * * We will attempt to enable U1 or U2, but there are no guarantees that the * control transfers to set the hub timeout or enable device-initiated U1/U2 * will be successful. * * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI * driver know about it. If that call fails, it should be harmless, and just * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. */ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout, ret; __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; /* If the device says it doesn't have *any* exit latency to come out of * U1 or U2, it's probably lying. Assume it doesn't implement that link * state. */ if ((state == USB3_LPM_U1 && u1_mel == 0) || (state == USB3_LPM_U2 && u2_mel == 0)) return; /* * First, let the device know about the exit latencies * associated with the link state we're about to enable. */ ret = usb_req_set_sel(udev, state); if (ret < 0) { dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n", usb3_lpm_names[state]); return; } /* We allow the host controller to set the U1/U2 timeout internally * first, so that it can change its schedule to account for the * additional latency to send data to a device in a lower power * link state. */ timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state); /* xHCI host controller doesn't want to enable this LPM state. */ if (timeout == 0) return; if (timeout < 0) { dev_warn(&udev->dev, "Could not enable %s link state, " "xHCI error %i.\n", usb3_lpm_names[state], timeout); return; } if (usb_set_lpm_timeout(udev, state, timeout)) /* If we can't set the parent hub U1/U2 timeout, * device-initiated LPM won't be allowed either, so let the xHCI * host know that this link state won't be enabled. */ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); /* Only a configured device will accept the Set Feature U1/U2_ENABLE */ else if (udev->actconfig) usb_set_device_initiated_lpm(udev, state, true); } /* * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated * U1/U2 entry. * * If this function returns -EBUSY, the parent hub will still allow U1/U2 entry. * If zero is returned, the parent will not allow the link to go into U1/U2. * * If zero is returned, device-initiated U1/U2 entry may still be enabled, but * it won't have an effect on the bus link state because the parent hub will * still disallow device-initiated U1/U2 entry. * * If zero is returned, the xHCI host controller may still think U1/U2 entry is * possible. The result will be slightly more bus bandwidth will be taken up * (to account for U1/U2 exit latency), but it should be harmless. */ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int feature; switch (state) { case USB3_LPM_U1: feature = USB_PORT_FEAT_U1_TIMEOUT; break; case USB3_LPM_U2: feature = USB_PORT_FEAT_U2_TIMEOUT; break; default: dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n", __func__); return -EINVAL; } if (usb_set_lpm_timeout(udev, state, 0)) return -EBUSY; usb_set_device_initiated_lpm(udev, state, false); if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state)) dev_warn(&udev->dev, "Could not disable xHCI %s timeout, " "bus schedule bandwidth may be impacted.\n", usb3_lpm_names[state]); return 0; } /* * Disable hub-initiated and device-initiated U1 and U2 entry. * Caller must own the bandwidth_mutex. * * This will call usb_enable_lpm() on failure, which will decrement * lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero. */ int usb_disable_lpm(struct usb_device *udev) { struct usb_hcd *hcd; if (!udev || !udev->parent || udev->speed != USB_SPEED_SUPER || !udev->lpm_capable) return 0; hcd = bus_to_hcd(udev->bus); if (!hcd || !hcd->driver->disable_usb3_lpm_timeout) return 0; udev->lpm_disable_count++; if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) return 0; /* If LPM is enabled, attempt to disable it. */ if (usb_disable_link_state(hcd, udev, USB3_LPM_U1)) goto enable_lpm; if (usb_disable_link_state(hcd, udev, USB3_LPM_U2)) goto enable_lpm; return 0; enable_lpm: usb_enable_lpm(udev); return -EBUSY; } EXPORT_SYMBOL_GPL(usb_disable_lpm); /* Grab the bandwidth_mutex before calling usb_disable_lpm() */ int usb_unlocked_disable_lpm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); int ret; if (!hcd) return -EINVAL; mutex_lock(hcd->bandwidth_mutex); ret = usb_disable_lpm(udev); mutex_unlock(hcd->bandwidth_mutex); return ret; } EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); /* * Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The * xHCI host policy may prevent U1 or U2 from being enabled. * * Other callers may have disabled link PM, so U1 and U2 entry will be disabled * until the lpm_disable_count drops to zero. Caller must own the * bandwidth_mutex. */ void usb_enable_lpm(struct usb_device *udev) { struct usb_hcd *hcd; if (!udev || !udev->parent || udev->speed != USB_SPEED_SUPER || !udev->lpm_capable) return; udev->lpm_disable_count--; hcd = bus_to_hcd(udev->bus); /* Double check that we can both enable and disable LPM. * Device must be configured to accept set feature U1/U2 timeout. */ if (!hcd || !hcd->driver->enable_usb3_lpm_timeout || !hcd->driver->disable_usb3_lpm_timeout) return; if (udev->lpm_disable_count > 0) return; usb_enable_link_state(hcd, udev, USB3_LPM_U1); usb_enable_link_state(hcd, udev, USB3_LPM_U2); } EXPORT_SYMBOL_GPL(usb_enable_lpm); /* Grab the bandwidth_mutex before calling usb_enable_lpm() */ void usb_unlocked_enable_lpm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); if (!hcd) return; mutex_lock(hcd->bandwidth_mutex); usb_enable_lpm(udev); mutex_unlock(hcd->bandwidth_mutex); } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); #else /* CONFIG_PM */ #define hub_suspend NULL #define hub_resume NULL #define hub_reset_resume NULL int usb_disable_lpm(struct usb_device *udev) { return 0; } EXPORT_SYMBOL_GPL(usb_disable_lpm); void usb_enable_lpm(struct usb_device *udev) { } EXPORT_SYMBOL_GPL(usb_enable_lpm); int usb_unlocked_disable_lpm(struct usb_device *udev) { return 0; } EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); void usb_unlocked_enable_lpm(struct usb_device *udev) { } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); int usb_disable_ltm(struct usb_device *udev) { return 0; } EXPORT_SYMBOL_GPL(usb_disable_ltm); void usb_enable_ltm(struct usb_device *udev) { } EXPORT_SYMBOL_GPL(usb_enable_ltm); #endif /* USB 2.0 spec, 7.1.7.3 / fig 7-29: * * Between connect detection and reset signaling there must be a delay * of 100ms at least for debounce and power-settling. The corresponding * timer shall restart whenever the downstream port detects a disconnect. * * Apparently there are some bluetooth and irda-dongles and a number of * low-speed devices for which this debounce period may last over a second. * Not covered by the spec - but easy to deal with. * * This implementation uses a 1500ms total debounce timeout; if the * connection isn't stable by then it returns -ETIMEDOUT. It checks * every 25ms for transient disconnects. When the port status has been * unchanged for 100ms it returns the port status. */ int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected) { int ret; int total_time, stable_time = 0; u16 portchange, portstatus; unsigned connection = 0xffff; for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; if (!(portchange & USB_PORT_STAT_C_CONNECTION) && (portstatus & USB_PORT_STAT_CONNECTION) == connection) { if (!must_be_connected || (connection == USB_PORT_STAT_CONNECTION)) stable_time += HUB_DEBOUNCE_STEP; if (stable_time >= HUB_DEBOUNCE_STABLE) break; } else { stable_time = 0; connection = portstatus & USB_PORT_STAT_CONNECTION; } if (portchange & USB_PORT_STAT_C_CONNECTION) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (total_time >= HUB_DEBOUNCE_TIMEOUT) break; msleep(HUB_DEBOUNCE_STEP); } dev_dbg (hub->intfdev, "debounce: port %d: total %dms stable %dms status 0x%x\n", port1, total_time, stable_time, portstatus); if (stable_time < HUB_DEBOUNCE_STABLE) return -ETIMEDOUT; return portstatus; } void usb_ep0_reinit(struct usb_device *udev) { usb_disable_endpoint(udev, 0 + USB_DIR_IN, true); usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true); usb_enable_endpoint(udev, &udev->ep0, true); } EXPORT_SYMBOL_GPL(usb_ep0_reinit); static int hub_set_address(struct usb_device *udev, int devnum) { int retval; struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* * The host controller will choose the device address, * instead of the core having chosen it earlier */ if (!hcd->driver->address_device && devnum <= 1) return -EINVAL; if (udev->state == USB_STATE_ADDRESS) return 0; if (udev->state != USB_STATE_DEFAULT) return -EINVAL; if (hcd->driver->address_device) retval = hcd->driver->address_device(hcd, udev); else retval = usb_control_msg(udev, usb_sndaddr0pipe(), USB_REQ_SET_ADDRESS, 0, devnum, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval == 0) { update_devnum(udev, devnum); /* Device now using proper address. */ usb_set_device_state(udev, USB_STATE_ADDRESS); usb_ep0_reinit(udev); } return retval; } /* Reset device, (re)assign address, get device descriptor. * Device connection must be stable, no more debouncing needed. * Returns device in USB_STATE_ADDRESS, except on error. * * If this is called for an already-existing device (as part of * usb_reset_and_verify_device), the caller must own the device lock. For a * newly detected device that is not accessible through any global * pointers, it's not necessary to lock the device. */ static int hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, int retry_counter) { static DEFINE_MUTEX(usb_address0_mutex); struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); int i, j, retval; unsigned delay = HUB_SHORT_RESET_TIME; enum usb_device_speed oldspeed = udev->speed; const char *speed; int devnum = udev->devnum; dump_stack(); /* root hub ports have a slightly longer reset period * (from USB 2.0 spec, section 7.1.7.5) */ if (!hdev->parent) { delay = HUB_ROOT_RESET_TIME; if (port1 == hdev->bus->otg_port) hdev->bus->b_hnp_enable = 0; } /* Some low speed devices have problems with the quick delay, so */ /* be a bit pessimistic with those devices. RHbug #23670 */ if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME; mutex_lock(&usb_address0_mutex); /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ MYDBG(""); retval = hub_port_reset(hub, port1, udev, delay, false); MYDBG(""); if (retval < 0) /* error or disconnect */ goto fail; /* success, speed is known */ retval = -ENODEV; if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { dev_dbg(&udev->dev, "device reset changed speed!\n"); goto fail; } oldspeed = udev->speed; /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... * it's fixed size except for full speed devices. * For Wireless USB devices, ep0 max packet is always 512 (tho * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. */ switch (udev->speed) { case USB_SPEED_SUPER: case USB_SPEED_WIRELESS: /* fixed at 512 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); break; case USB_SPEED_HIGH: /* fixed at 64 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); break; case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ /* to determine the ep0 maxpacket size, try to read * the device descriptor to get bMaxPacketSize0 and * then correct our initial guess. */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); break; case USB_SPEED_LOW: /* fixed at 8 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); break; default: goto fail; } MYDBG(""); if (udev->speed == USB_SPEED_WIRELESS) speed = "variable speed Wireless"; else speed = usb_speed_string(udev->speed); if (udev->speed != USB_SPEED_SUPER) dev_info(&udev->dev, "%s %s USB device number %d using %s\n", (udev->config) ? "reset" : "new", speed, devnum, udev->bus->controller->driver->name); /* Set up TT records, if needed */ if (hdev->tt) { udev->tt = hdev->tt; udev->ttport = hdev->ttport; } else if (udev->speed != USB_SPEED_HIGH && hdev->speed == USB_SPEED_HIGH) { if (!hub->tt.hub) { dev_err(&udev->dev, "parent hub has no TT\n"); retval = -EINVAL; goto fail; } udev->tt = &hub->tt; udev->ttport = port1; } /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? * Because device hardware and firmware is sometimes buggy in * this area, and this is how Linux has done it for ages. * Change it cautiously. * * NOTE: If USE_NEW_SCHEME() is true we will start by issuing * a 64-byte GET_DESCRIPTOR request. This is what Windows does, * so it may help with some non-standards-compliant devices. * Otherwise we start with SET_ADDRESS and then try to read the * first 8 bytes of the device descriptor to get the ep0 maxpacket * value. */ for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { MYDBG(""); if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) { struct usb_device_descriptor *buf; int r = 0; #define GET_DESCRIPTOR_BUFSIZE 64 buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); if (!buf) { retval = -ENOMEM; continue; } /* Retry on all errors; some devices are flakey. * 255 is for WUSB devices, we actually need to use * 512 (WUSB1.0[4.8.1]). */ for (j = 0; j < 3; ++j) { buf->bMaxPacketSize0 = 0; r = usb_control_msg(udev, usb_rcvaddr0pipe(), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, buf, GET_DESCRIPTOR_BUFSIZE, initial_descriptor_timeout); switch (buf->bMaxPacketSize0) { case 8: case 16: case 32: case 64: case 255: if (buf->bDescriptorType == USB_DT_DEVICE) { r = 0; break; } /* FALL THROUGH */ default: if (r == 0) r = -EPROTO; break; } if (r == 0) break; } udev->descriptor.bMaxPacketSize0 = buf->bMaxPacketSize0; kfree(buf); retval = hub_port_reset(hub, port1, udev, delay, false); if (retval < 0) /* error or disconnect */ goto fail; if (oldspeed != udev->speed) { dev_dbg(&udev->dev, "device reset changed speed!\n"); retval = -ENODEV; goto fail; } if (r) { if (r != -ENODEV) dev_err(&udev->dev, "device descriptor read/64, error %d\n", r); retval = -EMSGSIZE; continue; } #undef GET_DESCRIPTOR_BUFSIZE } /* * If device is WUSB, we already assigned an * unauthorized address in the Connect Ack sequence; * authorization will assign the final address. */ if (udev->wusb == 0) { for (j = 0; j < SET_ADDRESS_TRIES; ++j) { retval = hub_set_address(udev, devnum); if (retval >= 0) break; msleep(200); } if (retval < 0) { if (retval != -ENODEV) dev_err(&udev->dev, "device not accepting address %d, error %d\n", devnum, retval); goto fail; } if (udev->speed == USB_SPEED_SUPER) { devnum = udev->devnum; dev_info(&udev->dev, "%s SuperSpeed USB device number %d using %s\n", (udev->config) ? "reset" : "new", devnum, udev->bus->controller->driver->name); } /* cope with hardware quirkiness: * - let SET_ADDRESS settle, some device hardware wants it * - read ep0 maxpacket even for high and low speed, */ msleep(10); if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) break; } retval = usb_get_device_descriptor(udev, 8); if (retval < 8) { if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/8, error %d\n", retval); if (retval >= 0) retval = -EMSGSIZE; } else { retval = 0; break; } } if (retval) goto fail; if (hcd->phy && !hdev->parent) usb_phy_notify_connect(hcd->phy, udev->speed); /* * Some superspeed devices have finished the link training process * and attached to a superspeed hub port, but the device descriptor * got from those devices show they aren't superspeed devices. Warm * reset the port attached by the devices can fix them. */ if ((udev->speed == USB_SPEED_SUPER) && (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { dev_err(&udev->dev, "got a wrong device descriptor, " "warm reset device\n"); hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true); retval = -EINVAL; goto fail; } if (udev->descriptor.bMaxPacketSize0 == 0xff || udev->speed == USB_SPEED_SUPER) i = 512; else i = udev->descriptor.bMaxPacketSize0; if (usb_endpoint_maxp(&udev->ep0.desc) != i) { if (udev->speed == USB_SPEED_LOW || !(i == 8 || i == 16 || i == 32 || i == 64)) { dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); retval = -EMSGSIZE; goto fail; } if (udev->speed == USB_SPEED_FULL) dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); else dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); usb_ep0_reinit(udev); } retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); if (retval < (signed)sizeof(udev->descriptor)) { if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/all, error %d\n", retval); if (retval >= 0) retval = -ENOMSG; goto fail; } if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(udev); if (!retval) { udev->lpm_capable = usb_device_supports_lpm(udev); usb_set_lpm_parameters(udev); } } retval = 0; /* notify HCD that we have a device connected and addressed */ if (hcd->driver->update_device) hcd->driver->update_device(hcd, udev); fail: if (retval) { hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } mutex_unlock(&usb_address0_mutex); return retval; } static void check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) { struct usb_qualifier_descriptor *qual; int status; qual = kmalloc (sizeof *qual, GFP_KERNEL); if (qual == NULL) return; status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0, qual, sizeof *qual); if (status == sizeof *qual) { dev_info(&udev->dev, "not running at top speed; " "connect to a high speed hub\n"); /* hub LEDs are probably harder to miss than syslog */ if (hub->has_indicators) { hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; schedule_delayed_work (&hub->leds, 0); } } kfree(qual); } static unsigned hub_power_remaining (struct usb_hub *hub) { struct usb_device *hdev = hub->hdev; int remaining; int port1; if (!hub->limited_power) return 0; remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent; for (port1 = 1; port1 <= hdev->maxchild; ++port1) { struct usb_device *udev = hub->ports[port1 - 1]->child; int delta; unsigned unit_load; if (!udev) continue; if (hub_is_superspeed(udev)) unit_load = 150; else unit_load = 100; /* * Unconfigured devices may not use more than one unit load, * or 8mA for OTG ports */ if (udev->actconfig) delta = usb_get_max_power(udev, udev->actconfig); else if (port1 != udev->bus->otg_port || hdev->parent) delta = unit_load; else delta = 8; if (delta > hub->mA_per_port) dev_warn(&udev->dev, "%dmA is over %umA budget for port %d!\n", delta, hub->mA_per_port, port1); remaining -= delta; } if (remaining < 0) { dev_warn(hub->intfdev, "%dmA over power budget!\n", - remaining); remaining = 0; } return remaining; } /* Handle physical or logical connection change events. * This routine is called when: * a port connection-change occurs; * a port enable-change occurs (often caused by EMI); * usb_reset_and_verify_device() encounters changed descriptors (as from * a firmware download) * caller already locked the hub */ static void hub_port_connect_change(struct usb_hub *hub, int port1, u16 portstatus, u16 portchange) { struct usb_device *hdev = hub->hdev; struct device *hub_dev = hub->intfdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); unsigned wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); struct usb_device *udev; int status, i; unsigned unit_load; MYDBG(""); dev_dbg (hub_dev, "port %d, status %04x, change %04x, %s\n", port1, portstatus, portchange, portspeed(hub, portstatus)); if (hub->has_indicators) { set_port_led(hub, port1, HUB_LED_AUTO); hub->indicator[port1-1] = INDICATOR_AUTO; } #ifdef CONFIG_USB_OTG /* during HNP, don't repeat the debounce */ if (hdev->bus->is_b_host) portchange &= ~(USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE); #endif /* Try to resuscitate an existing device */ udev = hub->ports[port1 - 1]->child; if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && udev->state != USB_STATE_NOTATTACHED) { usb_lock_device(udev); if (portstatus & USB_PORT_STAT_ENABLE) { status = 0; /* Nothing to do */ #ifdef CONFIG_PM_RUNTIME } else if (udev->state == USB_STATE_SUSPENDED && udev->persist_enabled) { /* For a suspended device, treat this as a * remote wakeup event. */ status = usb_remote_wakeup(udev); #endif } else { status = -ENODEV; /* Don't resuscitate */ } usb_unlock_device(udev); if (status == 0) { clear_bit(port1, hub->change_bits); return; } } /* Disconnect any existing devices under this port */ if (udev) { if (hcd->phy && !hdev->parent && !(portstatus & USB_PORT_STAT_CONNECTION)) usb_phy_notify_disconnect(hcd->phy, udev->speed); usb_disconnect(&hub->ports[port1 - 1]->child); } clear_bit(port1, hub->change_bits); /* We can forget about a "removed" device when there's a physical * disconnect or the connect status changes. */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || (portchange & USB_PORT_STAT_C_CONNECTION)) clear_bit(port1, hub->removed_bits); if (portchange & (USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE)) { status = hub_port_debounce_be_stable(hub, port1); if (status < 0) { if (status != -ENODEV && printk_ratelimit()) dev_err(hub_dev, "connect-debounce failed, " "port %d disabled\n", port1); portstatus &= ~USB_PORT_STAT_CONNECTION; } else { portstatus = status; } } /* Return now if debouncing failed or nothing is connected or * the device was "removed". */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || test_bit(port1, hub->removed_bits)) { /* maybe switch power back on (e.g. root hub was reset) */ if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 && !port_is_power_on(hub, portstatus)) set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); if (portstatus & USB_PORT_STAT_ENABLE) goto done; return; } if (hub_is_superspeed(hub->hdev)) unit_load = 150; else unit_load = 100; status = 0; for (i = 0; i < SET_CONFIG_TRIES; i++) { /* reallocate for each attempt, since references * to the previous one can escape in various ways */ udev = usb_alloc_dev(hdev, hdev->bus, port1); if (!udev) { dev_err (hub_dev, "couldn't allocate port %d usb_device\n", port1); goto done; } usb_set_device_state(udev, USB_STATE_POWERED); udev->bus_mA = hub->mA_per_port; udev->level = hdev->level + 1; udev->wusb = hub_is_wusb(hub); /* Only USB 3.0 devices are connected to SuperSpeed hubs. */ if (hub_is_superspeed(hub->hdev)) udev->speed = USB_SPEED_SUPER; else udev->speed = USB_SPEED_UNKNOWN; choose_devnum(udev); if (udev->devnum <= 0) { status = -ENOTCONN; /* Don't retry */ goto loop; } /* reset (non-USB 3.0 devices) and get descriptor */ MYDBG(""); status = hub_port_init(hub, udev, port1, i); if (status < 0) { MYDBG(""); goto loop; } MYDBG(""); usb_detect_quirks(udev); if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(1000); /* consecutive bus-powered hubs aren't reliable; they can * violate the voltage drop budget. if the new child has * a "powered" LED, users should notice we didn't enable it * (without reading syslog), even without per-port LEDs * on the parent. */ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB && udev->bus_mA <= unit_load) { u16 devstat; status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstat); if (status < 2) { dev_dbg(&udev->dev, "get status %d ?\n", status); goto loop_disable; } le16_to_cpus(&devstat); if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) { dev_err(&udev->dev, "can't connect bus-powered hub " "to this port\n"); if (hub->has_indicators) { hub->indicator[port1-1] = INDICATOR_AMBER_BLINK; schedule_delayed_work (&hub->leds, 0); } status = -ENOTCONN; /* Don't retry */ goto loop_disable; } } /* check for devices running slower than they could */ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200 && udev->speed == USB_SPEED_FULL && highspeed_hubs != 0) check_highspeed (hub, udev, port1); /* Store the parent's children[] pointer. At this point * udev becomes globally accessible, although presumably * no one will look at it until hdev is unlocked. */ status = 0; /* We mustn't add new devices if the parent hub has * been disconnected; we would race with the * recursively_mark_NOTATTACHED() routine. */ spin_lock_irq(&device_state_lock); if (hdev->state == USB_STATE_NOTATTACHED) status = -ENOTCONN; else hub->ports[port1 - 1]->child = udev; spin_unlock_irq(&device_state_lock); /* Run it through the hoops (find a driver, etc) */ if (!status) { status = usb_new_device(udev); if (status) { spin_lock_irq(&device_state_lock); hub->ports[port1 - 1]->child = NULL; spin_unlock_irq(&device_state_lock); } #ifdef CONFIG_MTK_ICUSB_SUPPORT g_sim_dev = udev; MYDBG("get new device !!!, BUILD TIME : %s, g_sim_dev : %x\n", __TIME__, g_sim_dev); #endif #ifdef ORG_SUSPEND_RESUME_TEST g_sim_dev = udev; MYDBG("get new device !!!, BUILD TIME : %s, g_sim_dev : %x\n", __TIME__, g_sim_dev); #endif } if (status) goto loop_disable; status = hub_power_remaining(hub); if (status) dev_dbg(hub_dev, "%dmA power budget left\n", status); return; loop_disable: hub_port_disable(hub, port1, 1); loop: usb_ep0_reinit(udev); release_devnum(udev); hub_free_dev(udev); usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; } if (hub->hdev->parent || !hcd->driver->port_handed_over || !(hcd->driver->port_handed_over)(hcd, port1)) { if (status != -ENOTCONN && status != -ENODEV) dev_err(hub_dev, "unable to enumerate USB device on port %d\n", port1); } done: hub_port_disable(hub, port1, 1); if (hcd->driver->relinquish_port && !hub->hdev->parent) hcd->driver->relinquish_port(hcd, port1); } /* Returns 1 if there was a remote wakeup and a connect status change. */ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, u16 portstatus, u16 portchange) { struct usb_device *hdev; struct usb_device *udev; int connect_change = 0; int ret; hdev = hub->hdev; udev = hub->ports[port - 1]->child; if (!hub_is_superspeed(hdev)) { if (!(portchange & USB_PORT_STAT_C_SUSPEND)) return 0; usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND); } else { if (!udev || udev->state != USB_STATE_SUSPENDED || (portstatus & USB_PORT_STAT_LINK_STATE) != USB_SS_PORT_LS_U0) return 0; } if (udev) { /* TRSMRCY = 10 msec */ msleep(10); usb_lock_device(udev); ret = usb_remote_wakeup(udev); usb_unlock_device(udev); if (ret < 0) connect_change = 1; } else { ret = -ENODEV; hub_port_disable(hub, port, 1); } dev_dbg(hub->intfdev, "resume on port %d, status %d\n", port, ret); return connect_change; } static void hub_events(void) { struct list_head *tmp; struct usb_device *hdev; struct usb_interface *intf; struct usb_hub *hub; struct device *hub_dev; u16 hubstatus; u16 hubchange; u16 portstatus; u16 portchange; int i, ret; int connect_change, wakeup_change; /* * We restart the list every time to avoid a deadlock with * deleting hubs downstream from this one. This should be * safe since we delete the hub from the event list. * Not the most efficient, but avoids deadlocks. */ while (1) { /* Grab the first entry at the beginning of the list */ spin_lock_irq(&hub_event_lock); if (list_empty(&hub_event_list)) { spin_unlock_irq(&hub_event_lock); break; } tmp = hub_event_list.next; list_del_init(tmp); hub = list_entry(tmp, struct usb_hub, event_list); kref_get(&hub->kref); hdev = hub->hdev; usb_get_dev(hdev); spin_unlock_irq(&hub_event_lock); hub_dev = hub->intfdev; intf = to_usb_interface(hub_dev); dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", hdev->state, hub->descriptor ? hub->descriptor->bNbrPorts : 0, /* NOTE: expects max 15 ports... */ (u16) hub->change_bits[0], (u16) hub->event_bits[0]); /* Lock the device, then check to see if we were * disconnected while waiting for the lock to succeed. */ usb_lock_device(hdev); if (unlikely(hub->disconnected)) goto loop_disconnected; /* If the hub has died, clean up after it */ if (hdev->state == USB_STATE_NOTATTACHED) { hub->error = -ENODEV; hub_quiesce(hub, HUB_DISCONNECT); goto loop; } /* Autoresume */ ret = usb_autopm_get_interface(intf); if (ret) { dev_dbg(hub_dev, "Can't autoresume: %d\n", ret); goto loop; } /* If this is an inactive hub, do nothing */ if (hub->quiescing) goto loop_autopm; if (hub->error) { dev_dbg (hub_dev, "resetting for error %d\n", hub->error); MYDBG(""); ret = usb_reset_device(hdev); if (ret) { dev_dbg (hub_dev, "error resetting hub: %d\n", ret); goto loop_autopm; } hub->nerrors = 0; hub->error = 0; } /* deal with port status changes */ for (i = 1; i <= hub->descriptor->bNbrPorts; i++) { if (test_bit(i, hub->busy_bits)) continue; connect_change = test_bit(i, hub->change_bits); wakeup_change = test_and_clear_bit(i, hub->wakeup_bits); if (!test_and_clear_bit(i, hub->event_bits) && !connect_change && !wakeup_change) continue; ret = hub_port_status(hub, i, &portstatus, &portchange); if (ret < 0) continue; if (portchange & USB_PORT_STAT_C_CONNECTION) { usb_clear_port_feature(hdev, i, USB_PORT_FEAT_C_CONNECTION); connect_change = 1; } if (portchange & USB_PORT_STAT_C_ENABLE) { if (!connect_change) dev_dbg (hub_dev, "port %d enable change, " "status %08x\n", i, portstatus); usb_clear_port_feature(hdev, i, USB_PORT_FEAT_C_ENABLE); /* * EM interference sometimes causes badly * shielded USB devices to be shutdown by * the hub, this hack enables them again. * Works at least with mouse driver. */ if (!(portstatus & USB_PORT_STAT_ENABLE) && !connect_change && hub->ports[i - 1]->child) { dev_err (hub_dev, "port %i " "disabled by hub (EMI?), " "re-enabling...\n", i); connect_change = 1; } } if (hub_handle_remote_wakeup(hub, i, portstatus, portchange)) connect_change = 1; if (portchange & USB_PORT_STAT_C_OVERCURRENT) { u16 status = 0; u16 unused; dev_dbg(hub_dev, "over-current change on port " "%d\n", i); usb_clear_port_feature(hdev, i, USB_PORT_FEAT_C_OVER_CURRENT); msleep(100); /* Cool down */ hub_power_on(hub, true); hub_port_status(hub, i, &status, &unused); if (status & USB_PORT_STAT_OVERCURRENT) dev_err(hub_dev, "over-current " "condition on port %d\n", i); } if (portchange & USB_PORT_STAT_C_RESET) { dev_dbg (hub_dev, "reset change on port %d\n", i); usb_clear_port_feature(hdev, i, USB_PORT_FEAT_C_RESET); } if ((portchange & USB_PORT_STAT_C_BH_RESET) && hub_is_superspeed(hub->hdev)) { dev_dbg(hub_dev, "warm reset change on port %d\n", i); usb_clear_port_feature(hdev, i, USB_PORT_FEAT_C_BH_PORT_RESET); } if (portchange & USB_PORT_STAT_C_LINK_STATE) { usb_clear_port_feature(hub->hdev, i, USB_PORT_FEAT_C_PORT_LINK_STATE); } if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) { dev_warn(hub_dev, "config error on port %d\n", i); usb_clear_port_feature(hub->hdev, i, USB_PORT_FEAT_C_PORT_CONFIG_ERROR); } /* Warm reset a USB3 protocol port if it's in * SS.Inactive state. */ if (hub_port_warm_reset_required(hub, portstatus)) { int status; struct usb_device *udev = hub->ports[i - 1]->child; dev_dbg(hub_dev, "warm reset port %d\n", i); if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION) || udev->state == USB_STATE_NOTATTACHED) { status = hub_port_reset(hub, i, NULL, HUB_BH_RESET_TIME, true); if (status < 0) hub_port_disable(hub, i, 1); } else { usb_lock_device(udev); status = usb_reset_device(udev); usb_unlock_device(udev); connect_change = 0; } } if (connect_change) hub_port_connect_change(hub, i, portstatus, portchange); } /* end for i */ /* deal with hub status changes */ if (test_and_clear_bit(0, hub->event_bits) == 0) ; /* do nothing */ else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0) dev_err (hub_dev, "get_hub_status failed\n"); else { if (hubchange & HUB_CHANGE_LOCAL_POWER) { dev_dbg (hub_dev, "power change\n"); clear_hub_feature(hdev, C_HUB_LOCAL_POWER); if (hubstatus & HUB_STATUS_LOCAL_POWER) /* FIXME: Is this always true? */ hub->limited_power = 1; else hub->limited_power = 0; } if (hubchange & HUB_CHANGE_OVERCURRENT) { u16 status = 0; u16 unused; dev_dbg(hub_dev, "over-current change\n"); clear_hub_feature(hdev, C_HUB_OVER_CURRENT); msleep(500); /* Cool down */ hub_power_on(hub, true); hub_hub_status(hub, &status, &unused); if (status & HUB_STATUS_OVERCURRENT) dev_err(hub_dev, "over-current " "condition\n"); } } loop_autopm: /* Balance the usb_autopm_get_interface() above */ usb_autopm_put_interface_no_suspend(intf); loop: /* Balance the usb_autopm_get_interface_no_resume() in * kick_khubd() and allow autosuspend. */ usb_autopm_put_interface(intf); loop_disconnected: usb_unlock_device(hdev); usb_put_dev(hdev); kref_put(&hub->kref, hub_release); } /* end while (1) */ } static int hub_thread(void *__unused) { /* khubd needs to be freezable to avoid intefering with USB-PERSIST * port handover. Otherwise it might see that a full-speed device * was gone before the EHCI controller had handed its port over to * the companion full-speed controller. */ set_freezable(); do { hub_events(); wait_event_freezable(khubd_wait, !list_empty(&hub_event_list) || kthread_should_stop()); } while (!kthread_should_stop() || !list_empty(&hub_event_list)); pr_debug("%s: khubd exiting\n", usbcore_name); return 0; } static const struct usb_device_id hub_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_GENESYS_LOGIC, .bInterfaceClass = USB_CLASS_HUB, .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, .bDeviceClass = USB_CLASS_HUB}, { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_CLASS_HUB}, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, hub_id_table); static struct usb_driver hub_driver = { .name = "hub", .probe = hub_probe, .disconnect = hub_disconnect, .suspend = hub_suspend, .resume = hub_resume, .reset_resume = hub_reset_resume, .pre_reset = hub_pre_reset, .post_reset = hub_post_reset, .unlocked_ioctl = hub_ioctl, .id_table = hub_id_table, .supports_autosuspend = 1, }; int usb_hub_init(void) { if (usb_register(&hub_driver) < 0) { printk(KERN_ERR "%s: can't register hub driver\n", usbcore_name); return -1; } khubd_task = kthread_run(hub_thread, NULL, "khubd"); if (!IS_ERR(khubd_task)) return 0; /* Fall through if kernel_thread failed */ usb_deregister(&hub_driver); printk(KERN_ERR "%s: can't start khubd\n", usbcore_name); return -1; } void usb_hub_cleanup(void) { kthread_stop(khubd_task); /* * Hub resources are freed for us by usb_deregister. It calls * usb_driver_purge on every device which in turn calls that * devices disconnect function if it is using this driver. * The hub_disconnect function takes care of releasing the * individual hub resources. -greg */ usb_deregister(&hub_driver); } /* usb_hub_cleanup() */ static int descriptors_changed(struct usb_device *udev, struct usb_device_descriptor *old_device_descriptor) { int changed = 0; unsigned index; unsigned serial_len = 0; unsigned len; unsigned old_length; int length; char *buf; if (memcmp(&udev->descriptor, old_device_descriptor, sizeof(*old_device_descriptor)) != 0) return 1; /* Since the idVendor, idProduct, and bcdDevice values in the * device descriptor haven't changed, we will assume the * Manufacturer and Product strings haven't changed either. * But the SerialNumber string could be different (e.g., a * different flash card of the same brand). */ if (udev->serial) serial_len = strlen(udev->serial) + 1; len = serial_len; for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); len = max(len, old_length); } buf = kmalloc(len, GFP_NOIO); if (buf == NULL) { dev_err(&udev->dev, "no mem to re-read configs after reset\n"); /* assume the worst */ return 1; } for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf, old_length); if (length != old_length) { dev_dbg(&udev->dev, "config index %d, error %d\n", index, length); changed = 1; break; } if (memcmp (buf, udev->rawdescriptors[index], old_length) != 0) { dev_dbg(&udev->dev, "config index %d changed (#%d)\n", index, ((struct usb_config_descriptor *) buf)-> bConfigurationValue); changed = 1; break; } } if (!changed && serial_len) { length = usb_string(udev, udev->descriptor.iSerialNumber, buf, serial_len); if (length + 1 != serial_len) { dev_dbg(&udev->dev, "serial string error %d\n", length); changed = 1; } else if (memcmp(buf, udev->serial, length) != 0) { dev_dbg(&udev->dev, "serial string changed\n"); changed = 1; } } kfree(buf); return changed; } /** * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) * * WARNING - don't use this routine to reset a composite device * (one with multiple interfaces owned by separate drivers)! * Use usb_reset_device() instead. * * Do a port reset, reassign the device's address, and establish its * former operating configuration. If the reset fails, or the device's * descriptors change from their values before the reset, or the original * configuration and altsettings cannot be restored, a flag will be set * telling khubd to pretend the device has been disconnected and then * re-connected. All drivers will be unbound, and the device will be * re-enumerated and probed all over again. * * Returns 0 if the reset succeeded, -ENODEV if the device has been * flagged for logical disconnection, or some other negative error code * if the reset wasn't even attempted. * * The caller must own the device lock. For example, it's safe to use * this from a driver probe() routine after downloading new firmware. * For calls that might not occur during probe(), drivers should lock * the device using usb_lock_device_for_reset(). * * Locking exception: This routine may also be called from within an * autoresume handler. Such usage won't conflict with other tasks * holding the device lock because these tasks should always call * usb_autopm_resume_device(), thereby preventing any unwanted autoresume. */ static int usb_reset_and_verify_device(struct usb_device *udev) { struct usb_device *parent_hdev = udev->parent; struct usb_hub *parent_hub; struct usb_hcd *hcd = bus_to_hcd(udev->bus); struct usb_device_descriptor descriptor = udev->descriptor; int i, ret = 0; int port1 = udev->portnum; MYDBG(""); if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "device reset not allowed in state %d\n", udev->state); return -EINVAL; } if (!parent_hdev) { /* this requires hcd-specific logic; see ohci_restart() */ dev_dbg(&udev->dev, "%s for root hub!\n", __func__); return -EISDIR; } parent_hub = usb_hub_to_struct_hub(parent_hdev); /* Disable LPM and LTM while we reset the device and reinstall the alt * settings. Device-initiated LPM settings, and system exit latency * settings are cleared when the device is reset, so we have to set * them up again. */ ret = usb_unlocked_disable_lpm(udev); if (ret) { dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); goto re_enumerate; } ret = usb_disable_ltm(udev); if (ret) { dev_err(&udev->dev, "%s Failed to disable LTM\n.", __func__); goto re_enumerate; } set_bit(port1, parent_hub->busy_bits); for (i = 0; i < SET_CONFIG_TRIES; ++i) { /* ep0 maxpacket size may change; let the HCD know about it. * Other endpoints will be handled by re-enumeration. */ usb_ep0_reinit(udev); ret = hub_port_init(parent_hub, udev, port1, i); if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } clear_bit(port1, parent_hub->busy_bits); if (ret < 0) goto re_enumerate; /* Device might have changed firmware (DFU or similar) */ if (descriptors_changed(udev, &descriptor)) { dev_info(&udev->dev, "device firmware changed\n"); udev->descriptor = descriptor; /* for disconnect() calls */ goto re_enumerate; } /* Restore the device's previous configuration */ if (!udev->actconfig) goto done; mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL); if (ret < 0) { dev_warn(&udev->dev, "Busted HC? Not enough HCD resources for " "old configuration.\n"); mutex_unlock(hcd->bandwidth_mutex); goto re_enumerate; } ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_CONFIGURATION, 0, udev->actconfig->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "can't restore configuration #%d (error=%d)\n", udev->actconfig->desc.bConfigurationValue, ret); mutex_unlock(hcd->bandwidth_mutex); goto re_enumerate; } mutex_unlock(hcd->bandwidth_mutex); usb_set_device_state(udev, USB_STATE_CONFIGURED); /* Put interfaces back into the same altsettings as before. * Don't bother to send the Set-Interface request for interfaces * that were already in altsetting 0; besides being unnecessary, * many devices can't handle it. Instead just reset the host-side * endpoint state. */ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { struct usb_host_config *config = udev->actconfig; struct usb_interface *intf = config->interface[i]; struct usb_interface_descriptor *desc; desc = &intf->cur_altsetting->desc; if (desc->bAlternateSetting == 0) { usb_disable_interface(udev, intf, true); usb_enable_interface(udev, intf, true); ret = 0; } else { /* Let the bandwidth allocation function know that this * device has been reset, and it will have to use * alternate setting 0 as the current alternate setting. */ intf->resetting_device = 1; ret = usb_set_interface(udev, desc->bInterfaceNumber, desc->bAlternateSetting); intf->resetting_device = 0; } if (ret < 0) { dev_err(&udev->dev, "failed to restore interface %d " "altsetting %d (error=%d)\n", desc->bInterfaceNumber, desc->bAlternateSetting, ret); goto re_enumerate; } } done: /* Now that the alt settings are re-installed, enable LTM and LPM. */ usb_unlocked_enable_lpm(udev); usb_enable_ltm(udev); return 0; re_enumerate: /* LPM state doesn't matter when we're about to destroy the device. */ hub_port_logical_disconnect(parent_hub, port1); return -ENODEV; } /** * usb_reset_device - warn interface drivers and perform a USB port reset * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) * * Warns all drivers bound to registered interfaces (using their pre_reset * method), performs the port reset, and then lets the drivers know that * the reset is over (using their post_reset method). * * Return value is the same as for usb_reset_and_verify_device(). * * The caller must own the device lock. For example, it's safe to use * this from a driver probe() routine after downloading new firmware. * For calls that might not occur during probe(), drivers should lock * the device using usb_lock_device_for_reset(). * * If an interface is currently being probed or disconnected, we assume * its driver knows how to handle resets. For all other interfaces, * if the driver doesn't have pre_reset and post_reset methods then * we attempt to unbind it and rebind afterward. */ int usb_reset_device(struct usb_device *udev) { int ret; int i; unsigned int noio_flag; struct usb_host_config *config = udev->actconfig; MYDBG(""); if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "device reset not allowed in state %d\n", udev->state); return -EINVAL; } /* * Don't allocate memory with GFP_KERNEL in current * context to avoid possible deadlock if usb mass * storage interface or usbnet interface(iSCSI case) * is included in current configuration. The easist * approach is to do it for every device reset, * because the device 'memalloc_noio' flag may have * not been set before reseting the usb device. */ noio_flag = memalloc_noio_save(); /* Prevent autosuspend during the reset */ usb_autoresume_device(udev); if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { struct usb_interface *cintf = config->interface[i]; struct usb_driver *drv; int unbind = 0; if (cintf->dev.driver) { drv = to_usb_driver(cintf->dev.driver); if (drv->pre_reset && drv->post_reset) unbind = (drv->pre_reset)(cintf); else if (cintf->condition == USB_INTERFACE_BOUND) unbind = 1; if (unbind) usb_forced_unbind_intf(cintf); } } } ret = usb_reset_and_verify_device(udev); if (config) { for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) { struct usb_interface *cintf = config->interface[i]; struct usb_driver *drv; int rebind = cintf->needs_binding; if (!rebind && cintf->dev.driver) { drv = to_usb_driver(cintf->dev.driver); if (drv->post_reset) rebind = (drv->post_reset)(cintf); else if (cintf->condition == USB_INTERFACE_BOUND) rebind = 1; if (rebind) cintf->needs_binding = 1; } } usb_unbind_and_rebind_marked_interfaces(udev); } usb_autosuspend_device(udev); memalloc_noio_restore(noio_flag); return ret; } EXPORT_SYMBOL_GPL(usb_reset_device); /** * usb_queue_reset_device - Reset a USB device from an atomic context * @iface: USB interface belonging to the device to reset * * This function can be used to reset a USB device from an atomic * context, where usb_reset_device() won't work (as it blocks). * * Doing a reset via this method is functionally equivalent to calling * usb_reset_device(), except for the fact that it is delayed to a * workqueue. This means that any drivers bound to other interfaces * might be unbound, as well as users from usbfs in user space. * * Corner cases: * * - Scheduling two resets at the same time from two different drivers * attached to two different interfaces of the same device is * possible; depending on how the driver attached to each interface * handles ->pre_reset(), the second reset might happen or not. * * - If a driver is unbound and it had a pending reset, the reset will * be cancelled. * * - This function can be called during .probe() or .disconnect() * times. On return from .disconnect(), any pending resets will be * cancelled. * * There is no no need to lock/unlock the @reset_ws as schedule_work() * does its own. * * NOTE: We don't do any reference count tracking because it is not * needed. The lifecycle of the work_struct is tied to the * usb_interface. Before destroying the interface we cancel the * work_struct, so the fact that work_struct is queued and or * running means the interface (and thus, the device) exist and * are referenced. */ void usb_queue_reset_device(struct usb_interface *iface) { schedule_work(&iface->reset_ws); } EXPORT_SYMBOL_GPL(usb_queue_reset_device); /** * usb_hub_find_child - Get the pointer of child device * attached to the port which is specified by @port1. * @hdev: USB device belonging to the usb hub * @port1: port num to indicate which port the child device * is attached to. * * USB drivers call this function to get hub's child device * pointer. * * Return NULL if input param is invalid and * child's usb_device pointer if non-NULL. */ struct usb_device *usb_hub_find_child(struct usb_device *hdev, int port1) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (port1 < 1 || port1 > hdev->maxchild) return NULL; return hub->ports[port1 - 1]->child; } EXPORT_SYMBOL_GPL(usb_hub_find_child); /** * usb_set_hub_port_connect_type - set hub port connect type. * @hdev: USB device belonging to the usb hub * @port1: port num of the port * @type: connect type of the port */ void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1, enum usb_port_connect_type type) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); hub->ports[port1 - 1]->connect_type = type; } /** * usb_get_hub_port_connect_type - Get the port's connect type * @hdev: USB device belonging to the usb hub * @port1: port num of the port * * Return connect type of the port and if input params are * invalid, return USB_PORT_CONNECT_TYPE_UNKNOWN. */ enum usb_port_connect_type usb_get_hub_port_connect_type(struct usb_device *hdev, int port1) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); return hub->ports[port1 - 1]->connect_type; } void usb_hub_adjust_deviceremovable(struct usb_device *hdev, struct usb_hub_descriptor *desc) { enum usb_port_connect_type connect_type; int i; if (!hub_is_superspeed(hdev)) { for (i = 1; i <= hdev->maxchild; i++) { connect_type = usb_get_hub_port_connect_type(hdev, i); if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { u8 mask = 1 << (i%8); if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) { dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n", i); desc->u.hs.DeviceRemovable[i/8] |= mask; } } } } else { u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable); for (i = 1; i <= hdev->maxchild; i++) { connect_type = usb_get_hub_port_connect_type(hdev, i); if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { u16 mask = 1 << i; if (!(port_removable & mask)) { dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n", i); port_removable |= mask; } } } desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable); } } #ifdef CONFIG_ACPI /** * usb_get_hub_port_acpi_handle - Get the usb port's acpi handle * @hdev: USB device belonging to the usb hub * @port1: port num of the port * * Return port's acpi handle if successful, NULL if params are * invaild. */ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, int port1) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); } #endif
gpl-2.0
svn2github/vbox
src/VBox/Runtime/common/checksum/manifest-file.cpp
10
2759
/* $Id$ */ /** @file * IPRT - Manifest, the bits with file dependencies */ /* * Copyright (C) 2010 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * The contents of this file may alternatively be used under the terms * of the Common Development and Distribution License Version 1.0 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the * VirtualBox OSE distribution, in which case the provisions of the * CDDL are applicable instead of those of the GPL. * * You may elect to license modified versions of this file under the * terms and conditions of either the GPL or the CDDL or both. */ /******************************************************************************* * Header Files * *******************************************************************************/ #include "internal/iprt.h" #include <iprt/manifest.h> #include <iprt/asm.h> #include <iprt/assert.h> #include <iprt/err.h> #include <iprt/file.h> #include <iprt/mem.h> #include <iprt/string.h> #include <iprt/vfs.h> #include "internal/magics.h" RTDECL(int) RTManifestReadStandardFromFile(RTMANIFEST hManifest, const char *pszFilename) { RTFILE hFile; uint32_t fFlags = RTFILE_O_READ | RTFILE_O_DENY_WRITE | RTFILE_O_OPEN; int rc = RTFileOpen(&hFile, pszFilename, fFlags); if (RT_SUCCESS(rc)) { RTVFSIOSTREAM hVfsIos; rc = RTVfsIoStrmFromRTFile(hFile, fFlags, true /*fLeaveOpen*/, &hVfsIos); if (RT_SUCCESS(rc)) { rc = RTManifestReadStandard(hManifest, hVfsIos); RTVfsIoStrmRelease(hVfsIos); } RTFileClose(hFile); } return rc; } RTDECL(int) RTManifestWriteStandardToFile(RTMANIFEST hManifest, const char *pszFilename) { RTFILE hFile; uint32_t fFlags = RTFILE_O_WRITE | RTFILE_O_DENY_WRITE | RTFILE_O_CREATE_REPLACE; int rc = RTFileOpen(&hFile, pszFilename, fFlags); if (RT_SUCCESS(rc)) { RTVFSIOSTREAM hVfsIos; rc = RTVfsIoStrmFromRTFile(hFile, fFlags, true /*fLeaveOpen*/, &hVfsIos); if (RT_SUCCESS(rc)) { rc = RTManifestWriteStandard(hManifest, hVfsIos); RTVfsIoStrmRelease(hVfsIos); } RTFileClose(hFile); } return rc; }
gpl-2.0
delfinof/libfftw-win
dft/simd/common/n1fv_15.c
10
13164
/* * Copyright (c) 2003, 2007-11 Matteo Frigo * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ /* This file was automatically generated --- DO NOT EDIT */ /* Generated on Sun Nov 25 07:36:52 EST 2012 */ #include "codelet-dft.h" #ifdef HAVE_FMA /* Generated by: ../../../genfft/gen_notw_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 15 -name n1fv_15 -include n1f.h */ /* * This function contains 78 FP additions, 49 FP multiplications, * (or, 36 additions, 7 multiplications, 42 fused multiply/add), * 78 stack variables, 8 constants, and 30 memory accesses */ #include "n1f.h" static void n1fv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) { DVK(KP823639103, +0.823639103546331925877420039278190003029660514); DVK(KP910592997, +0.910592997310029334643087372129977886038870291); DVK(KP559016994, +0.559016994374947424102293417182819058860154590); DVK(KP951056516, +0.951056516295153572116439333379382143405698634); DVK(KP866025403, +0.866025403784438646763723170752936183471402627); DVK(KP250000000, +0.250000000000000000000000000000000000000000000); DVK(KP618033988, +0.618033988749894848204586834365638117720309180); DVK(KP500000000, +0.500000000000000000000000000000000000000000000); { INT i; const R *xi; R *xo; xi = ri; xo = ro; for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) { V Tb, TX, TM, TQ, Th, TB, T5, Ti, Ta, TC, TN, Te, TG, Tq, Tj; V T1, T2, T3; T1 = LD(&(xi[0]), ivs, &(xi[0])); T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)])); T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0])); { V T6, T7, T8, Tm, Tn, To; T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)])); T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0])); T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)])); Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)])); Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0])); To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0])); { V T4, Tc, T9, Td, Tp; Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0])); T4 = VADD(T2, T3); TX = VSUB(T3, T2); Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0])); TM = VSUB(T8, T7); T9 = VADD(T7, T8); Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)])); Tp = VADD(Tn, To); TQ = VSUB(To, Tn); Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0])); TB = VFNMS(LDK(KP500000000), T4, T1); T5 = VADD(T1, T4); Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)])); Ta = VADD(T6, T9); TC = VFNMS(LDK(KP500000000), T9, T6); TN = VSUB(Td, Tc); Te = VADD(Tc, Td); TG = VFNMS(LDK(KP500000000), Tp, Tm); Tq = VADD(Tm, Tp); Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)])); } } { V TY, TO, Tf, TD, TP, Tk; TY = VADD(TM, TN); TO = VSUB(TM, TN); Tf = VADD(Tb, Te); TD = VFNMS(LDK(KP500000000), Te, Tb); TP = VSUB(Tj, Ti); Tk = VADD(Ti, Tj); { V Tx, Tg, TE, TU, TZ, TR, Tl, TF; Tx = VSUB(Ta, Tf); Tg = VADD(Ta, Tf); TE = VADD(TC, TD); TU = VSUB(TC, TD); TZ = VADD(TP, TQ); TR = VSUB(TP, TQ); Tl = VADD(Th, Tk); TF = VFNMS(LDK(KP500000000), Tk, Th); { V T12, T10, T18, TS, Tw, Tr, TH, TV, T11, T1g; T12 = VSUB(TY, TZ); T10 = VADD(TY, TZ); T18 = VFNMS(LDK(KP618033988), TO, TR); TS = VFMA(LDK(KP618033988), TR, TO); Tw = VSUB(Tl, Tq); Tr = VADD(Tl, Tq); TH = VADD(TF, TG); TV = VSUB(TF, TG); T11 = VFNMS(LDK(KP250000000), T10, TX); T1g = VMUL(LDK(KP866025403), VADD(TX, T10)); { V TA, Ty, Tu, TK, TI, T1a, TW, T1b, T13, Tt, Ts, TJ, T1f; TA = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), Tw, Tx)); Ty = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), Tx, Tw)); Ts = VADD(Tg, Tr); Tu = VSUB(Tg, Tr); TK = VSUB(TE, TH); TI = VADD(TE, TH); T1a = VFNMS(LDK(KP618033988), TU, TV); TW = VFMA(LDK(KP618033988), TV, TU); T1b = VFNMS(LDK(KP559016994), T12, T11); T13 = VFMA(LDK(KP559016994), T12, T11); ST(&(xo[0]), VADD(T5, Ts), ovs, &(xo[0])); Tt = VFNMS(LDK(KP250000000), Ts, T5); TJ = VFNMS(LDK(KP250000000), TI, TB); T1f = VADD(TB, TI); { V T1c, T1e, T16, T14, Tv, Tz, T17, TL; T1c = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T1b, T1a)); T1e = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T1b, T1a)); T16 = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T13, TW)); T14 = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T13, TW)); Tv = VFNMS(LDK(KP559016994), Tu, Tt); Tz = VFMA(LDK(KP559016994), Tu, Tt); T17 = VFNMS(LDK(KP559016994), TK, TJ); TL = VFMA(LDK(KP559016994), TK, TJ); ST(&(xo[WS(os, 10)]), VFMAI(T1g, T1f), ovs, &(xo[0])); ST(&(xo[WS(os, 5)]), VFNMSI(T1g, T1f), ovs, &(xo[WS(os, 1)])); { V T19, T1d, T15, TT; ST(&(xo[WS(os, 12)]), VFMAI(Ty, Tv), ovs, &(xo[0])); ST(&(xo[WS(os, 3)]), VFNMSI(Ty, Tv), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 9)]), VFMAI(TA, Tz), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 6)]), VFNMSI(TA, Tz), ovs, &(xo[0])); T19 = VFMA(LDK(KP823639103), T18, T17); T1d = VFNMS(LDK(KP823639103), T18, T17); T15 = VFNMS(LDK(KP823639103), TS, TL); TT = VFMA(LDK(KP823639103), TS, TL); ST(&(xo[WS(os, 2)]), VFMAI(T1c, T19), ovs, &(xo[0])); ST(&(xo[WS(os, 13)]), VFNMSI(T1c, T19), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 7)]), VFMAI(T1e, T1d), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 8)]), VFNMSI(T1e, T1d), ovs, &(xo[0])); ST(&(xo[WS(os, 4)]), VFMAI(T16, T15), ovs, &(xo[0])); ST(&(xo[WS(os, 11)]), VFNMSI(T16, T15), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 14)]), VFMAI(T14, TT), ovs, &(xo[0])); ST(&(xo[WS(os, 1)]), VFNMSI(T14, TT), ovs, &(xo[WS(os, 1)])); } } } } } } } } VLEAVE(); } static const kdft_desc desc = { 15, XSIMD_STRING("n1fv_15"), {36, 7, 42, 0}, &GENUS, 0, 0, 0, 0 }; void XSIMD(codelet_n1fv_15) (planner *p) { X(kdft_register) (p, n1fv_15, &desc); } #else /* HAVE_FMA */ /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 15 -name n1fv_15 -include n1f.h */ /* * This function contains 78 FP additions, 25 FP multiplications, * (or, 64 additions, 11 multiplications, 14 fused multiply/add), * 55 stack variables, 10 constants, and 30 memory accesses */ #include "n1f.h" static void n1fv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) { DVK(KP216506350, +0.216506350946109661690930792688234045867850657); DVK(KP509036960, +0.509036960455127183450980863393907648510733164); DVK(KP823639103, +0.823639103546331925877420039278190003029660514); DVK(KP587785252, +0.587785252292473129168705954639072768597652438); DVK(KP951056516, +0.951056516295153572116439333379382143405698634); DVK(KP250000000, +0.250000000000000000000000000000000000000000000); DVK(KP559016994, +0.559016994374947424102293417182819058860154590); DVK(KP866025403, +0.866025403784438646763723170752936183471402627); DVK(KP484122918, +0.484122918275927110647408174972799951354115213); DVK(KP500000000, +0.500000000000000000000000000000000000000000000); { INT i; const R *xi; R *xo; xi = ri; xo = ro; for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) { V T5, T10, TB, TO, TU, TV, TR, Ta, Tf, Tg, Tl, Tq, Tr, TE, TH; V TI, TZ, T11, T1f, T1g; { V T1, T2, T3, T4; T1 = LD(&(xi[0]), ivs, &(xi[0])); T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)])); T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0])); T4 = VADD(T2, T3); T5 = VADD(T1, T4); T10 = VSUB(T3, T2); TB = VFNMS(LDK(KP500000000), T4, T1); } { V T6, T9, TC, TP, Tm, Tp, TG, TN, Tb, Te, TD, TQ, Th, Tk, TF; V TM, TX, TY; { V T7, T8, Tn, To; T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)])); T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0])); T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)])); T9 = VADD(T7, T8); TC = VFNMS(LDK(KP500000000), T9, T6); TP = VSUB(T8, T7); Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)])); Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0])); To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0])); Tp = VADD(Tn, To); TG = VFNMS(LDK(KP500000000), Tp, Tm); TN = VSUB(To, Tn); } { V Tc, Td, Ti, Tj; Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0])); Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0])); Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)])); Te = VADD(Tc, Td); TD = VFNMS(LDK(KP500000000), Te, Tb); TQ = VSUB(Td, Tc); Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0])); Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)])); Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)])); Tk = VADD(Ti, Tj); TF = VFNMS(LDK(KP500000000), Tk, Th); TM = VSUB(Tj, Ti); } TO = VSUB(TM, TN); TU = VSUB(TF, TG); TV = VSUB(TC, TD); TR = VSUB(TP, TQ); Ta = VADD(T6, T9); Tf = VADD(Tb, Te); Tg = VADD(Ta, Tf); Tl = VADD(Th, Tk); Tq = VADD(Tm, Tp); Tr = VADD(Tl, Tq); TE = VADD(TC, TD); TH = VADD(TF, TG); TI = VADD(TE, TH); TX = VADD(TP, TQ); TY = VADD(TM, TN); TZ = VMUL(LDK(KP484122918), VSUB(TX, TY)); T11 = VADD(TX, TY); } T1f = VADD(TB, TI); T1g = VBYI(VMUL(LDK(KP866025403), VADD(T10, T11))); ST(&(xo[WS(os, 5)]), VSUB(T1f, T1g), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 10)]), VADD(T1f, T1g), ovs, &(xo[0])); { V Tu, Ts, Tt, Ty, TA, Tw, Tx, Tz, Tv; Tu = VMUL(LDK(KP559016994), VSUB(Tg, Tr)); Ts = VADD(Tg, Tr); Tt = VFNMS(LDK(KP250000000), Ts, T5); Tw = VSUB(Tl, Tq); Tx = VSUB(Ta, Tf); Ty = VBYI(VFNMS(LDK(KP587785252), Tx, VMUL(LDK(KP951056516), Tw))); TA = VBYI(VFMA(LDK(KP951056516), Tx, VMUL(LDK(KP587785252), Tw))); ST(&(xo[0]), VADD(T5, Ts), ovs, &(xo[0])); Tz = VADD(Tu, Tt); ST(&(xo[WS(os, 6)]), VSUB(Tz, TA), ovs, &(xo[0])); ST(&(xo[WS(os, 9)]), VADD(TA, Tz), ovs, &(xo[WS(os, 1)])); Tv = VSUB(Tt, Tu); ST(&(xo[WS(os, 3)]), VSUB(Tv, Ty), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 12)]), VADD(Ty, Tv), ovs, &(xo[0])); } { V TS, TW, T1b, T18, T13, T1a, TL, T17, T12, TJ, TK; TS = VFNMS(LDK(KP509036960), TR, VMUL(LDK(KP823639103), TO)); TW = VFNMS(LDK(KP587785252), TV, VMUL(LDK(KP951056516), TU)); T1b = VFMA(LDK(KP951056516), TV, VMUL(LDK(KP587785252), TU)); T18 = VFMA(LDK(KP823639103), TR, VMUL(LDK(KP509036960), TO)); T12 = VFNMS(LDK(KP216506350), T11, VMUL(LDK(KP866025403), T10)); T13 = VSUB(TZ, T12); T1a = VADD(TZ, T12); TJ = VFNMS(LDK(KP250000000), TI, TB); TK = VMUL(LDK(KP559016994), VSUB(TE, TH)); TL = VSUB(TJ, TK); T17 = VADD(TK, TJ); { V TT, T14, T1d, T1e; TT = VSUB(TL, TS); T14 = VBYI(VSUB(TW, T13)); ST(&(xo[WS(os, 8)]), VSUB(TT, T14), ovs, &(xo[0])); ST(&(xo[WS(os, 7)]), VADD(TT, T14), ovs, &(xo[WS(os, 1)])); T1d = VSUB(T17, T18); T1e = VBYI(VADD(T1b, T1a)); ST(&(xo[WS(os, 11)]), VSUB(T1d, T1e), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 4)]), VADD(T1d, T1e), ovs, &(xo[0])); } { V T15, T16, T19, T1c; T15 = VADD(TL, TS); T16 = VBYI(VADD(TW, T13)); ST(&(xo[WS(os, 13)]), VSUB(T15, T16), ovs, &(xo[WS(os, 1)])); ST(&(xo[WS(os, 2)]), VADD(T15, T16), ovs, &(xo[0])); T19 = VADD(T17, T18); T1c = VBYI(VSUB(T1a, T1b)); ST(&(xo[WS(os, 14)]), VSUB(T19, T1c), ovs, &(xo[0])); ST(&(xo[WS(os, 1)]), VADD(T19, T1c), ovs, &(xo[WS(os, 1)])); } } } } VLEAVE(); } static const kdft_desc desc = { 15, XSIMD_STRING("n1fv_15"), {64, 11, 14, 0}, &GENUS, 0, 0, 0, 0 }; void XSIMD(codelet_n1fv_15) (planner *p) { X(kdft_register) (p, n1fv_15, &desc); } #endif /* HAVE_FMA */
gpl-2.0
russfiedler/mom
src/atmos_spectral/init/topog_regularization.F90
10
11410
module topog_regularization_mod ! produces regularized topography according to Lindberg and Broccoli, ! J. of Climate vol 9, no 11 pg. 2641-2659 (1996) ! Originally coded by Charles Jackson ! Modified for FMS by Peter Phillipps use fms_mod, only: mpp_pe, mpp_root_pe, error_mesg, FATAL, & write_version_number use mpp_mod, only: mpp_chksum use constants_mod, only: pi use transforms_mod, only: compute_gaussian, compute_legendre, & trans_grid_to_spherical, trans_spherical_to_grid, & get_sin_lat, get_wts_lat, transforms_are_initialized, & get_lon_max, get_lat_max, get_num_fourier, get_fourier_inc,& get_num_spherical, get_grid_domain, get_spec_domain, & grid_domain, spectral_domain, area_weighted_global_mean use mpp_domains_mod, only: mpp_global_field implicit none private character(len=128), parameter :: version = & '$Id: topog_regularization.F90,v 13.0 2006/03/28 21:17:37 fms Exp $' character(len=128), parameter :: tagname = & '$Name: tikal $' public :: compute_lambda, regularize integer, parameter :: itmax=1000 real, parameter :: tolerance = 1.e-5 integer :: is, ie, js, je, ms, me, ns, ne integer :: lon_max, lat_max, num_fourier, num_spherical, fourier_inc, nmax real, allocatable, dimension(:,: ) :: smoothed_field_tmp, rough, cost_field real, allocatable, dimension(: ) :: wts_lat_global, sin_lat_global, facm, sin_facm complex, allocatable, dimension(:,: ) :: Dnm, anm, bnm, Hnm, DR2, DelAnm, DelBnm logical :: module_is_initialized=.false. character(len=8) :: chtmp1, chtmp2 contains !============================================================================================ subroutine compute_lambda(ocean_topog_smoothing, ocean_mask, unsmoothed_field, lambda, actual_fraction_smoothed) real, intent(in) :: ocean_topog_smoothing logical, intent(in), dimension(:,:) :: ocean_mask real, intent(in), dimension(:,:) :: unsmoothed_field real, intent(out) :: lambda, actual_fraction_smoothed real :: lambda_1, lambda_2, fraction_smoothed_1, fraction_smoothed_2 real :: tol_lambda = .001 integer :: it_lambda, itmax_lambda=20 if(.not.module_is_initialized) then call topog_regularization_init(ocean_mask) endif if(any(shape(unsmoothed_field) /= (/ie-is+1,je-js+1/))) then write(chtmp1,'(2i4)') shape(unsmoothed_field) write(chtmp2,'(2i4)') (/ie-is+1,je-js+1/) call error_mesg('compute_lambda', & 'Input argument unsmoothed_field has incorrect dimensions. shape(unsmoothed_field)='//chtmp1//' Should be '//chtmp2,FATAL) endif lambda_1 = 1.e-7 lambda_2 = 2.e-7 call regularize(lambda_1, ocean_mask, unsmoothed_field, smoothed_field_tmp, fraction_smoothed_1) if(abs(ocean_topog_smoothing-fraction_smoothed_1) < tol_lambda) then lambda = lambda_1 actual_fraction_smoothed = fraction_smoothed_1 goto 20 endif call regularize(lambda_2, ocean_mask, unsmoothed_field, smoothed_field_tmp, fraction_smoothed_2) if(abs(ocean_topog_smoothing-fraction_smoothed_2) < tol_lambda) then lambda = lambda_2 actual_fraction_smoothed = fraction_smoothed_2 goto 20 endif if(fraction_smoothed_1 > ocean_topog_smoothing .or. fraction_smoothed_2 > ocean_topog_smoothing) then call error_mesg('compute_lambda', & 'Iterative scheme for computing lambda may not work unless initial values of lambda_1 and lambda_2 are reduced.', FATAL) endif lambda_1 = ((fraction_smoothed_2-ocean_topog_smoothing)*lambda_1 + & (ocean_topog_smoothing-fraction_smoothed_1)*lambda_2)/(fraction_smoothed_2-fraction_smoothed_1) if(lambda_1 < 0.) then call error_mesg('compute_lambda', & 'Iterative scheme for finding lambda will not work unless initial values of lambda_1 and lambda_2 are reduced.', FATAL) endif call regularize(lambda_1, ocean_mask, unsmoothed_field, smoothed_field_tmp, fraction_smoothed_1) do it_lambda=1,itmax_lambda if(abs(ocean_topog_smoothing-fraction_smoothed_1) < tol_lambda) then lambda = lambda_1 actual_fraction_smoothed = fraction_smoothed_1 goto 20 endif lambda_2 = ((fraction_smoothed_2-ocean_topog_smoothing)*lambda_1 + & (ocean_topog_smoothing-fraction_smoothed_1)*lambda_2)/(fraction_smoothed_2-fraction_smoothed_1) if(lambda_2 < 0.) then write(chtmp1,'(i8)') it_lambda call error_mesg('compute_lambda', & 'Iterative scheme for finding lambda failed. lambda went negative on iteration number'//chtmp1, FATAL) endif call regularize(lambda_2, ocean_mask, unsmoothed_field, smoothed_field_tmp, fraction_smoothed_2) if(abs(ocean_topog_smoothing-fraction_smoothed_2) < tol_lambda) then lambda = lambda_2 actual_fraction_smoothed = fraction_smoothed_2 goto 20 endif lambda_1 = ((fraction_smoothed_2-ocean_topog_smoothing)*lambda_1 + & (ocean_topog_smoothing-fraction_smoothed_1)*lambda_2)/(fraction_smoothed_2-fraction_smoothed_1) call regularize(lambda_1, ocean_mask, unsmoothed_field, smoothed_field_tmp, fraction_smoothed_1) enddo call error_mesg('compute_lambda','Cannot converge on a value of lambda. Perhaps more interations are needed.', FATAL) 20 continue return end subroutine compute_lambda !============================================================================================ subroutine regularize(lambda, ocean_mask, unsmoothed_field, smoothed_field, fraction_smoothed) real, intent(in) :: lambda logical, intent(in), dimension(:,:) :: ocean_mask real, intent(in), dimension(:,:) :: unsmoothed_field real, intent(out), dimension(size(ocean_mask,1), size(ocean_mask,2)) :: smoothed_field real, intent(out) :: fraction_smoothed real :: converg, cost, oldcost, lamcost, lamcosti integer :: m, n, it if(.not.module_is_initialized) then call topog_regularization_init(ocean_mask) endif if(any(shape(unsmoothed_field) /= (/ie-is+1,je-js+1/))) then write(chtmp1,'(2i4)') shape(unsmoothed_field) write(chtmp2,'(2i4)') (/ie-is+1,je-js+1/) call error_mesg('regularize', & 'Input argument unsmoothed_field has incorrect dimensions. shape(unsmoothed_field)='//chtmp1//' Should be '//chtmp2,FATAL) endif if(is /= 1 .or. ie /= lon_max) then call error_mesg('regularize', & 'subroutine regularize is not yet coded for 2-d decomposition. It was assumed that it will never be needed.',FATAL) endif Hnm=cmplx(0.,0.) do n=ns,nmax do m=ms,me Hnm(m,n)=1./(1.+lambda*Dnm(m,n)*((n+m)*(n+m+1))**2) enddo enddo bnm=cmplx(0.,0.) call trans_grid_to_spherical(unsmoothed_field,bnm) ! anm (equation 6.3) anm = cmplx(0.,0.) do n=ns,nmax do m=ms,me anm(m,n)=bnm(m,n)/(1.+lambda*((n+m)*(n+m+1))**2) enddo enddo DelAnm = cmplx(0.,0.) do n=ns,nmax do m=ms,me DelAnm(m,n)=(n+m)*(n+m+1)*anm(m,n) enddo enddo call trans_spherical_to_grid(DelAnm,rough) converg=1. cost=0. DelBnm = cmplx(0.,0.) do it=1,itmax if (abs(converg) < tolerance) goto 60 ! rough is zeroed out over land where(.not.ocean_mask) rough = 0. end where ! Do an iteration of smoothing call trans_grid_to_spherical(rough,DR2) do n=ns,nmax do m=ms,me DR2(m,n)=(n+m)*(n+m+1)*DR2(m,n) enddo enddo do n=ns,nmax do m=max(ms,1),me anm(m,n)=(anm(m,n)+Hnm(m,n)*(bnm(m,n)-anm(m,n))-lambda*Hnm(m,n)*DR2(m,n))*sin_facm(m)/facm(m) enddo enddo if(ms == 0) then do n=ns,nmax anm(0,n)=anm(0,n)+Hnm(0,n)*(bnm(0,n)-anm(0,n))-lambda*Hnm(0,n)*DR2(0,n) enddo endif ! transform the smoothed field to grid call trans_spherical_to_grid(anm,smoothed_field) ! compute cost function (eq. 6.4) do n=ns,nmax do m=ms,me DelAnm(m,n)=(n+m)*(n+m+1)*anm(m,n) enddo enddo call trans_spherical_to_grid(DelAnm,rough) where(ocean_mask) cost_field = (unsmoothed_field-smoothed_field)**2 + lambda*rough**2 else where cost_field = 0. end where oldcost=cost cost = area_weighted_global_mean(cost_field) if (it > 1) then converg=(oldcost-cost)/oldcost endif enddo call error_mesg('regularize','Failure to converge',FATAL) 60 continue do n=ns,nmax do m=ms,me DelBnm(m,n)=(n+m)*(n+m+1)*bnm(m,n) enddo enddo call trans_spherical_to_grid(DelBnm,rough) where(ocean_mask) cost_field = rough**2 else where cost_field = 0. end where lamcosti = area_weighted_global_mean(cost_field) call trans_spherical_to_grid(DelAnm,rough) where(ocean_mask) cost_field = rough**2 else where cost_field = 0. end where lamcost = area_weighted_global_mean(cost_field) fraction_smoothed = 1. - lamcost/lamcosti if(mpp_pe() == mpp_root_pe()) then print '("Message from subroutine regularize: lambda=",1pe16.8," fraction_smoothed=",1pe16.8)',lambda,fraction_smoothed endif return end subroutine regularize !=================================================================================== subroutine topog_regularization_init(ocean_mask) logical, intent(in), dimension(:,:) :: ocean_mask integer :: m, i, j real, allocatable, dimension(:,:,:) :: legendre_global, legendre logical, allocatable, dimension(:,:) :: ocean_mask_global call write_version_number(version, tagname) if(.not.transforms_are_initialized()) then call error_mesg('topog_regularization_init','Transforms are not initialized',FATAL) endif call get_grid_domain(is, ie, js, je) call get_spec_domain(ms, me, ns, ne) if(any(shape(ocean_mask) /= (/ie-is+1,je-js+1/))) then write(chtmp1,'(2i4)') shape(ocean_mask) write(chtmp2,'(2i4)') (/ie-is+1,je-js+1/) call error_mesg('topog_regularization_init', & 'Input argument ocean_mask has incorrect dimensions. shape(ocean_mask)='//chtmp1//' Should be '//chtmp2,FATAL) endif call get_lon_max(lon_max) call get_lat_max(lat_max) call get_num_fourier(num_fourier) call get_num_spherical(num_spherical) call get_fourier_inc(fourier_inc) nmax = min(num_fourier,ne) allocate(sin_lat_global(lat_max)) allocate(wts_lat_global(lat_max)) call get_sin_lat(sin_lat_global) call get_wts_lat(wts_lat_global) allocate(facm (ms:me)) allocate(sin_facm(ms:me)) do m=ms,me facm(m) = pi*m/(2.*num_fourier) sin_facm(m) = sin(facm(m)) enddo allocate(Dnm(ms:me,ns:ne), Hnm(ms:me,ns:ne), bnm(ms:me,ns:ne), anm(ms:me,ns:ne)) allocate(DR2(ms:me,ns:ne), DelAnm(ms:me,ns:ne), DelBnm(ms:me,ns:ne)) allocate(smoothed_field_tmp(is:ie,js:je), rough(is:ie,js:je), cost_field(is:ie,js:je)) allocate(legendre_global(0:num_fourier,0:num_spherical,lat_max/2)) allocate(legendre(ms:me, ns:ne, lat_max/2)) call compute_legendre(legendre_global, num_fourier, fourier_inc, num_spherical, -sin_lat_global(1:lat_max/2), lat_max/2) legendre = legendre_global(ms:me,ns:ne,:) allocate(ocean_mask_global(lon_max,lat_max)) call mpp_global_field(grid_domain, ocean_mask, ocean_mask_global) Dnm=cmplx(0.,0.) do i=1,lon_max do j=1,lat_max/2 if(ocean_mask_global(i,j)) then Dnm = Dnm + wts_lat_global(j)*legendre(:,:,j)**2 endif enddo do j=lat_max/2+1,lat_max if(ocean_mask_global(i,j)) then Dnm = Dnm + wts_lat_global(j)*legendre(:,:,lat_max+1-j)**2 endif enddo enddo deallocate(legendre_global, legendre, ocean_mask_global) Dnm = Dnm/lon_max module_is_initialized = .true. return end subroutine topog_regularization_init !=================================================================================== end module topog_regularization_mod
gpl-2.0
GoldenCheetah/GoldenCheetah
src/Cloud/CloudDBVersion.cpp
10
8582
/* * Copyright (c) 2015 Joern Rischmueller (joern.rm@gmail.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "CloudDBVersion.h" #include "CloudDBCommon.h" #include "Secrets.h" #include "GcUpgrade.h" #include "Colors.h" #include <QJsonParseError> #include <QJsonObject> #include <QJsonArray> #include <QUrl> #include <QUrlQuery> #include <QMessageBox> #include <QEventLoop> #include <QLabel> // the Version code must be in sync with CloudDB GAE Version codes int CloudDBVersionClient::CloudDBVersion_Release = 10; int CloudDBVersionClient::CloudDBVersion_ReleaseCandidate = 20; int CloudDBVersionClient::CloudDBVersion_DevelopmentBuild = 30; // minimum number of days between each check int CloudDBVersionClient::CloudDBVersion_Days_Delay = 10; CloudDBVersionClient::CloudDBVersionClient() { } CloudDBVersionClient::~CloudDBVersionClient() { } void CloudDBVersionClient::informUserAboutLatestVersions() { // update check is done only once every xx days - to save DB read quota QDate lastUpdateCheck = (appsettings->value(NULL, GC_LAST_VERSION_CHECK_DATE, QDate(1990, 1, 1)).toDate()); if (lastUpdateCheck.addDays(CloudDBVersionClient::CloudDBVersion_Days_Delay) > QDate::currentDate()) return; // consider any updates done since the last start and consider only newer versions int lastVersion = appsettings->value(NULL, GC_LAST_VERSION_CHECKED, 0).toInt(); if (VERSION_LATEST > lastVersion) { appsettings->setValue(GC_LAST_VERSION_CHECKED, VERSION_LATEST); lastVersion = VERSION_LATEST; } QNetworkAccessManager *l_nam = new QNetworkAccessManager(this); QNetworkRequest request; QUrlQuery query; query.addQueryItem("version", QString::number(lastVersion)); CloudDBCommon::prepareRequest(request, CloudDBCommon::cloudDBBaseURL+"version", &query); l_nam->get(request); connect(l_nam, SIGNAL(finished(QNetworkReply*)), this, SLOT(showVersionPopup(QNetworkReply*))); // check is done, update the date for next check period appsettings->setValue(GC_LAST_VERSION_CHECK_DATE, QDate::currentDate()); } void CloudDBVersionClient::showVersionPopup(QNetworkReply * l_reply) { QList<VersionAPIGetV1> retrieved; if (l_reply->error() == QNetworkReply::NoError) { QByteArray result = l_reply->readAll(); unmarshallAPIGetV1(result, &retrieved); } if (retrieved.count() > 0) { CloudDBUpdateAvailableDialog updateAvailableDialog(retrieved); updateAvailableDialog.setModal(true); // we are not interested in the result - update check status is updated as part of the dialog box updateAvailableDialog.exec(); } } bool CloudDBVersionClient::unmarshallAPIGetV1(QByteArray json, QList<VersionAPIGetV1> *versionList) { QJsonParseError parseError; QJsonDocument document = QJsonDocument::fromJson(json, &parseError); // all these things should not happen and we have not valid object to return if (parseError.error != QJsonParseError::NoError || document.isEmpty() || document.isNull()) { return false; } // we only get single objects here if (document.isObject()) { VersionAPIGetV1 version; QJsonObject object = document.object(); version.Id = object.value("id").toDouble(); version.Version = object.value("version").toInt(); version.VersionText = object.value("versionText").toString(); version.Type = object.value("releaseType").toInt(); version.URL = object.value("downloadURL").toString(); version.Text = object.value("text").toString(); versionList->append(version); } else if (document.isArray()) { QJsonArray array(document.array()); for (int i = 0; i< array.size(); i++) { QJsonValue value = array.at(i); if (value.isObject()) { VersionAPIGetV1 version; QJsonObject object = value.toObject(); version.Id = object.value("id").toDouble(); version.Version = object.value("version").toInt(); version.VersionText = object.value("versionText").toString(); version.Type = object.value("releaseType").toInt(); version.URL = object.value("downloadURL").toString(); version.Text = object.value("text").toString(); versionList->append(version); } } } return true; } CloudDBUpdateAvailableDialog::CloudDBUpdateAvailableDialog(QList<VersionAPIGetV1> versions) : versions(versions) { setWindowTitle(QString(tr("GoldenCheetah - Check for new versions"))); setMinimumWidth(750*dpiXFactor); QVBoxLayout *layout = new QVBoxLayout(this); QPushButton *important = new QPushButton(style()->standardIcon(QStyle::SP_MessageBoxInformation), "", this); important->setFixedSize(80*dpiXFactor,80*dpiYFactor); important->setFlat(true); important->setIconSize(QSize(80*dpiXFactor,80*dpiYFactor)); important->setAutoFillBackground(false); important->setFocusPolicy(Qt::NoFocus); QLabel *header = new QLabel(this); header->setWordWrap(true); header->setTextFormat(Qt::RichText); if (versions.count()>1) { header->setText(QString(tr("<b><big>New versions of GoldenCheetah are available</big></b>"))); } else { header->setText(QString(tr("<b><big>A new version of GoldenCheetah is available</big></b>"))); } QHBoxLayout *toprow = new QHBoxLayout; toprow->addWidget(important); toprow->addWidget(header); layout->addLayout(toprow); QLabel *text = new QLabel(this); text->setWordWrap(true); text->setTextFormat(Qt::RichText); text->setOpenExternalLinks(true); // build text QString messageText; VersionAPIGetV1 version; foreach (version, versions) { if (version.Type == CloudDBVersionClient::CloudDBVersion_Release) { messageText.append("<h3>" + tr("Release: %1 ").arg(version.VersionText)); } else if (version.Type == CloudDBVersionClient::CloudDBVersion_ReleaseCandidate) { messageText.append("<h3>" + tr("Release Candidate: %1 ").arg(version.VersionText)); } else if (version.Type == CloudDBVersionClient::CloudDBVersion_DevelopmentBuild) { messageText.append("<h3>" + tr("Development Release: %1 ").arg(version.VersionText)); }; messageText.append( QString("</h3><b><a href=\"%1\">%2</a></b><br><br>").arg(version.URL).arg(version.URL) + version.Text + "<br>"); if (versions.count()>1) { messageText.append("<hr>"); } }; text->setText(messageText); scrollText = new QScrollArea(); scrollText->setWidget(text); scrollText->setWidgetResizable(true); layout->addWidget(scrollText); QHBoxLayout *lastRow = new QHBoxLayout; doNotAskAgainButton = new QPushButton(tr("Do not show again"), this); connect(doNotAskAgainButton, SIGNAL(clicked()), this, SLOT(doNotAskAgain())); askAgainNextStartButton = new QPushButton(tr("Show again in %1 days").arg(CloudDBVersionClient::CloudDBVersion_Days_Delay), this); askAgainNextStartButton->setDefault(true); connect(askAgainNextStartButton, SIGNAL(clicked()), this, SLOT(askAgainOnNextStart())); lastRow->addStretch(); lastRow->addWidget(doNotAskAgainButton); lastRow->addWidget(askAgainNextStartButton); layout->addLayout(lastRow); } void CloudDBUpdateAvailableDialog::doNotAskAgain() { // update the version too which update check was done, too not show it again if (versions.count()>0) { appsettings->setValue(GC_LAST_VERSION_CHECKED, versions.at(0).Version); } accept(); } void CloudDBUpdateAvailableDialog::askAgainOnNextStart() { // no update to the version to be checked against - same procedure next start reject(); }
gpl-2.0
rudischilder/gr10_2
sw/airborne/subsystems/gps/gps_sim.c
10
1397
/* * Copyright (C) 2008-2011 The Paparazzi Team * * This file is part of paparazzi. * * paparazzi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * paparazzi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with paparazzi; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #include "subsystems/gps.h" bool_t gps_available; #if 0 void gps_feed_values(double utm_north, double utm_east, double utm_alt, double gspeed, double course, double climb) { gps.utm_pos.north = CM_OF_M(utm_north); gps.utm_pos.east = CM_OF_M(utm_east); //TODO set height above ellipsoid properly gps.hmsl = utm_alt * 1000.; gps.gspeed = CM_OF_M(gspeed); gps.course = EM7RAD_OF_RAD(RadOfDeg(course / 10.)); gps.ned_vel.z = -climb * 100.; gps.fix = GPS_FIX_3D; gps_available = TRUE; } #endif void gps_impl_init(void) { gps.fix = GPS_FIX_NONE; gps_available = FALSE; }
gpl-2.0
RaspberryPi-CM/android_kernel_raspberry_pi2
sound/soc/codecs/tas2552.c
10
20596
/* * tas2552.c - ALSA SoC Texas Instruments TAS2552 Mono Audio Amplifier * * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com * * Author: Dan Murphy <dmurphy@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/tlv.h> #include <sound/tas2552-plat.h> #include <dt-bindings/sound/tas2552.h> #include "tas2552.h" static struct reg_default tas2552_reg_defs[] = { {TAS2552_CFG_1, 0x22}, {TAS2552_CFG_3, 0x80}, {TAS2552_DOUT, 0x00}, {TAS2552_OUTPUT_DATA, 0xc0}, {TAS2552_PDM_CFG, 0x01}, {TAS2552_PGA_GAIN, 0x00}, {TAS2552_BOOST_APT_CTRL, 0x0f}, {TAS2552_RESERVED_0D, 0xbe}, {TAS2552_LIMIT_RATE_HYS, 0x08}, {TAS2552_CFG_2, 0xef}, {TAS2552_SER_CTRL_1, 0x00}, {TAS2552_SER_CTRL_2, 0x00}, {TAS2552_PLL_CTRL_1, 0x10}, {TAS2552_PLL_CTRL_2, 0x00}, {TAS2552_PLL_CTRL_3, 0x00}, {TAS2552_BTIP, 0x8f}, {TAS2552_BTS_CTRL, 0x80}, {TAS2552_LIMIT_RELEASE, 0x04}, {TAS2552_LIMIT_INT_COUNT, 0x00}, {TAS2552_EDGE_RATE_CTRL, 0x40}, {TAS2552_VBAT_DATA, 0x00}, }; #define TAS2552_NUM_SUPPLIES 3 static const char *tas2552_supply_names[TAS2552_NUM_SUPPLIES] = { "vbat", /* vbat voltage */ "iovdd", /* I/O Voltage */ "avdd", /* Analog DAC Voltage */ }; struct tas2552_data { struct snd_soc_codec *codec; struct regmap *regmap; struct i2c_client *tas2552_client; struct regulator_bulk_data supplies[TAS2552_NUM_SUPPLIES]; struct gpio_desc *enable_gpio; unsigned char regs[TAS2552_VBAT_DATA]; unsigned int pll_clkin; int pll_clk_id; unsigned int pdm_clk; int pdm_clk_id; unsigned int dai_fmt; unsigned int tdm_delay; }; static int tas2552_post_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_write(codec, TAS2552_RESERVED_0D, 0xc0); snd_soc_update_bits(codec, TAS2552_LIMIT_RATE_HYS, (1 << 5), (1 << 5)); snd_soc_update_bits(codec, TAS2552_CFG_2, 1, 0); snd_soc_update_bits(codec, TAS2552_CFG_1, TAS2552_SWS, 0); break; case SND_SOC_DAPM_POST_PMD: snd_soc_update_bits(codec, TAS2552_CFG_1, TAS2552_SWS, TAS2552_SWS); snd_soc_update_bits(codec, TAS2552_CFG_2, 1, 1); snd_soc_update_bits(codec, TAS2552_LIMIT_RATE_HYS, (1 << 5), 0); snd_soc_write(codec, TAS2552_RESERVED_0D, 0xbe); break; } return 0; } /* Input mux controls */ static const char * const tas2552_input_texts[] = { "Digital", "Analog" }; static SOC_ENUM_SINGLE_DECL(tas2552_input_mux_enum, TAS2552_CFG_3, 7, tas2552_input_texts); static const struct snd_kcontrol_new tas2552_input_mux_control = SOC_DAPM_ENUM("Route", tas2552_input_mux_enum); static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN"), /* MUX Controls */ SND_SOC_DAPM_MUX("Input selection", SND_SOC_NOPM, 0, 0, &tas2552_input_mux_control), SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0), SND_SOC_DAPM_POST("Post Event", tas2552_post_event), SND_SOC_DAPM_OUTPUT("OUT") }; static const struct snd_soc_dapm_route tas2552_audio_map[] = { {"DAC", NULL, "DAC IN"}, {"Input selection", "Digital", "DAC"}, {"Input selection", "Analog", "IN"}, {"ClassD", NULL, "Input selection"}, {"OUT", NULL, "ClassD"}, {"ClassD", NULL, "PLL"}, }; #ifdef CONFIG_PM static void tas2552_sw_shutdown(struct tas2552_data *tas2552, int sw_shutdown) { u8 cfg1_reg = 0; if (!tas2552->codec) return; if (sw_shutdown) cfg1_reg = TAS2552_SWS; snd_soc_update_bits(tas2552->codec, TAS2552_CFG_1, TAS2552_SWS, cfg1_reg); } #endif static int tas2552_setup_pll(struct snd_soc_codec *codec, struct snd_pcm_hw_params *params) { struct tas2552_data *tas2552 = dev_get_drvdata(codec->dev); bool bypass_pll = false; unsigned int pll_clk = params_rate(params) * 512; unsigned int pll_clkin = tas2552->pll_clkin; u8 pll_enable; if (!pll_clkin) { if (tas2552->pll_clk_id != TAS2552_PLL_CLKIN_BCLK) return -EINVAL; pll_clkin = snd_soc_params_to_bclk(params); pll_clkin += tas2552->tdm_delay; } pll_enable = snd_soc_read(codec, TAS2552_CFG_2) & TAS2552_PLL_ENABLE; snd_soc_update_bits(codec, TAS2552_CFG_2, TAS2552_PLL_ENABLE, 0); if (pll_clkin == pll_clk) bypass_pll = true; if (bypass_pll) { /* By pass the PLL configuration */ snd_soc_update_bits(codec, TAS2552_PLL_CTRL_2, TAS2552_PLL_BYPASS, TAS2552_PLL_BYPASS); } else { /* Fill in the PLL control registers for J & D * pll_clk = (.5 * pll_clkin * J.D) / 2^p * Need to fill in J and D here based on incoming freq */ unsigned int d; u8 j; u8 pll_sel = (tas2552->pll_clk_id << 3) & TAS2552_PLL_SRC_MASK; u8 p = snd_soc_read(codec, TAS2552_PLL_CTRL_1); p = (p >> 7); recalc: j = (pll_clk * 2 * (1 << p)) / pll_clkin; d = (pll_clk * 2 * (1 << p)) % pll_clkin; d /= (pll_clkin / 10000); if (d && (pll_clkin < 512000 || pll_clkin > 9200000)) { if (tas2552->pll_clk_id == TAS2552_PLL_CLKIN_BCLK) { pll_clkin = 1800000; pll_sel = (TAS2552_PLL_CLKIN_1_8_FIXED << 3) & TAS2552_PLL_SRC_MASK; } else { pll_clkin = snd_soc_params_to_bclk(params); pll_clkin += tas2552->tdm_delay; pll_sel = (TAS2552_PLL_CLKIN_BCLK << 3) & TAS2552_PLL_SRC_MASK; } goto recalc; } snd_soc_update_bits(codec, TAS2552_CFG_1, TAS2552_PLL_SRC_MASK, pll_sel); snd_soc_update_bits(codec, TAS2552_PLL_CTRL_1, TAS2552_PLL_J_MASK, j); /* Will clear the PLL_BYPASS bit */ snd_soc_write(codec, TAS2552_PLL_CTRL_2, TAS2552_PLL_D_UPPER(d)); snd_soc_write(codec, TAS2552_PLL_CTRL_3, TAS2552_PLL_D_LOWER(d)); } /* Restore PLL status */ snd_soc_update_bits(codec, TAS2552_CFG_2, TAS2552_PLL_ENABLE, pll_enable); return 0; } static int tas2552_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = dev_get_drvdata(codec->dev); int cpf; u8 ser_ctrl1_reg, wclk_rate; switch (params_width(params)) { case 16: ser_ctrl1_reg = TAS2552_WORDLENGTH_16BIT; cpf = 32 + tas2552->tdm_delay; break; case 20: ser_ctrl1_reg = TAS2552_WORDLENGTH_20BIT; cpf = 64 + tas2552->tdm_delay; break; case 24: ser_ctrl1_reg = TAS2552_WORDLENGTH_24BIT; cpf = 64 + tas2552->tdm_delay; break; case 32: ser_ctrl1_reg = TAS2552_WORDLENGTH_32BIT; cpf = 64 + tas2552->tdm_delay; break; default: dev_err(codec->dev, "Not supported sample size: %d\n", params_width(params)); return -EINVAL; } if (cpf <= 32) ser_ctrl1_reg |= TAS2552_CLKSPERFRAME_32; else if (cpf <= 64) ser_ctrl1_reg |= TAS2552_CLKSPERFRAME_64; else if (cpf <= 128) ser_ctrl1_reg |= TAS2552_CLKSPERFRAME_128; else ser_ctrl1_reg |= TAS2552_CLKSPERFRAME_256; snd_soc_update_bits(codec, TAS2552_SER_CTRL_1, TAS2552_WORDLENGTH_MASK | TAS2552_CLKSPERFRAME_MASK, ser_ctrl1_reg); switch (params_rate(params)) { case 8000: wclk_rate = TAS2552_WCLK_FREQ_8KHZ; break; case 11025: case 12000: wclk_rate = TAS2552_WCLK_FREQ_11_12KHZ; break; case 16000: wclk_rate = TAS2552_WCLK_FREQ_16KHZ; break; case 22050: case 24000: wclk_rate = TAS2552_WCLK_FREQ_22_24KHZ; break; case 32000: wclk_rate = TAS2552_WCLK_FREQ_32KHZ; break; case 44100: case 48000: wclk_rate = TAS2552_WCLK_FREQ_44_48KHZ; break; case 88200: case 96000: wclk_rate = TAS2552_WCLK_FREQ_88_96KHZ; break; case 176400: case 192000: wclk_rate = TAS2552_WCLK_FREQ_176_192KHZ; break; default: dev_err(codec->dev, "Not supported sample rate: %d\n", params_rate(params)); return -EINVAL; } snd_soc_update_bits(codec, TAS2552_CFG_3, TAS2552_WCLK_FREQ_MASK, wclk_rate); return tas2552_setup_pll(codec, params); } #define TAS2552_DAI_FMT_MASK (TAS2552_BCLKDIR | \ TAS2552_WCLKDIR | \ TAS2552_DATAFORMAT_MASK) static int tas2552_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int delay = 0; /* TDM slot selection only valid in DSP_A/_B mode */ if (tas2552->dai_fmt == SND_SOC_DAIFMT_DSP_A) delay += (tas2552->tdm_delay + 1); else if (tas2552->dai_fmt == SND_SOC_DAIFMT_DSP_B) delay += tas2552->tdm_delay; /* Configure data delay */ snd_soc_write(codec, TAS2552_SER_CTRL_2, delay); return 0; } static int tas2552_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = dev_get_drvdata(codec->dev); u8 serial_format; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: serial_format = 0x00; break; case SND_SOC_DAIFMT_CBS_CFM: serial_format = TAS2552_WCLKDIR; break; case SND_SOC_DAIFMT_CBM_CFS: serial_format = TAS2552_BCLKDIR; break; case SND_SOC_DAIFMT_CBM_CFM: serial_format = (TAS2552_BCLKDIR | TAS2552_WCLKDIR); break; default: dev_vdbg(codec->dev, "DAI Format master is not found\n"); return -EINVAL; } switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_INV_MASK)) { case (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF): break; case (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF): case (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF): serial_format |= TAS2552_DATAFORMAT_DSP; break; case (SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_NB_NF): serial_format |= TAS2552_DATAFORMAT_RIGHT_J; break; case (SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF): serial_format |= TAS2552_DATAFORMAT_LEFT_J; break; default: dev_vdbg(codec->dev, "DAI Format is not found\n"); return -EINVAL; } tas2552->dai_fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK; snd_soc_update_bits(codec, TAS2552_SER_CTRL_1, TAS2552_DAI_FMT_MASK, serial_format); return 0; } static int tas2552_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = dev_get_drvdata(codec->dev); u8 reg, mask, val; switch (clk_id) { case TAS2552_PLL_CLKIN_MCLK: case TAS2552_PLL_CLKIN_IVCLKIN: if (freq < 512000 || freq > 24576000) { /* out of range PLL_CLKIN, fall back to use BCLK */ dev_warn(codec->dev, "Out of range PLL_CLKIN: %u\n", freq); clk_id = TAS2552_PLL_CLKIN_BCLK; freq = 0; } /* fall through */ case TAS2552_PLL_CLKIN_BCLK: case TAS2552_PLL_CLKIN_1_8_FIXED: mask = TAS2552_PLL_SRC_MASK; val = (clk_id << 3) & mask; /* bit 4:5 in the register */ reg = TAS2552_CFG_1; tas2552->pll_clk_id = clk_id; tas2552->pll_clkin = freq; break; case TAS2552_PDM_CLK_PLL: case TAS2552_PDM_CLK_IVCLKIN: case TAS2552_PDM_CLK_BCLK: case TAS2552_PDM_CLK_MCLK: mask = TAS2552_PDM_CLK_SEL_MASK; val = (clk_id >> 1) & mask; /* bit 0:1 in the register */ reg = TAS2552_PDM_CFG; tas2552->pdm_clk_id = clk_id; tas2552->pdm_clk = freq; break; default: dev_err(codec->dev, "Invalid clk id: %d\n", clk_id); return -EINVAL; } snd_soc_update_bits(codec, reg, mask, val); return 0; } static int tas2552_set_dai_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); unsigned int lsb; if (unlikely(!tx_mask)) { dev_err(codec->dev, "tx masks need to be non 0\n"); return -EINVAL; } /* TDM based on DSP mode requires slots to be adjacent */ lsb = __ffs(tx_mask); if ((lsb + 1) != __fls(tx_mask)) { dev_err(codec->dev, "Invalid mask, slots must be adjacent\n"); return -EINVAL; } tas2552->tdm_delay = lsb * slot_width; /* DOUT in high-impedance on inactive bit clocks */ snd_soc_update_bits(codec, TAS2552_DOUT, TAS2552_SDOUT_TRISTATE, TAS2552_SDOUT_TRISTATE); return 0; } static int tas2552_mute(struct snd_soc_dai *dai, int mute) { u8 cfg1_reg = 0; struct snd_soc_codec *codec = dai->codec; if (mute) cfg1_reg |= TAS2552_MUTE; snd_soc_update_bits(codec, TAS2552_CFG_1, TAS2552_MUTE, cfg1_reg); return 0; } #ifdef CONFIG_PM static int tas2552_runtime_suspend(struct device *dev) { struct tas2552_data *tas2552 = dev_get_drvdata(dev); tas2552_sw_shutdown(tas2552, 1); regcache_cache_only(tas2552->regmap, true); regcache_mark_dirty(tas2552->regmap); if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 0); return 0; } static int tas2552_runtime_resume(struct device *dev) { struct tas2552_data *tas2552 = dev_get_drvdata(dev); if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 1); tas2552_sw_shutdown(tas2552, 0); regcache_cache_only(tas2552->regmap, false); regcache_sync(tas2552->regmap); return 0; } #endif static const struct dev_pm_ops tas2552_pm = { SET_RUNTIME_PM_OPS(tas2552_runtime_suspend, tas2552_runtime_resume, NULL) }; static struct snd_soc_dai_ops tas2552_speaker_dai_ops = { .hw_params = tas2552_hw_params, .prepare = tas2552_prepare, .set_sysclk = tas2552_set_dai_sysclk, .set_fmt = tas2552_set_dai_fmt, .set_tdm_slot = tas2552_set_dai_tdm_slot, .digital_mute = tas2552_mute, }; /* Formats supported by TAS2552 driver. */ #define TAS2552_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) /* TAS2552 dai structure. */ static struct snd_soc_dai_driver tas2552_dai[] = { { .name = "tas2552-amplifier", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = TAS2552_FORMATS, }, .ops = &tas2552_speaker_dai_ops, }, }; /* * DAC digital volumes. From -7 to 24 dB in 1 dB steps */ static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0); static const char * const tas2552_din_source_select[] = { "Muted", "Left", "Right", "Left + Right average", }; static SOC_ENUM_SINGLE_DECL(tas2552_din_source_enum, TAS2552_CFG_3, 3, tas2552_din_source_select); static const struct snd_kcontrol_new tas2552_snd_controls[] = { SOC_SINGLE_TLV("Speaker Driver Playback Volume", TAS2552_PGA_GAIN, 0, 0x1f, 0, dac_tlv), SOC_ENUM("DIN source", tas2552_din_source_enum), }; static int tas2552_codec_probe(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int ret; tas2552->codec = codec; ret = regulator_bulk_enable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); return ret; } if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 1); ret = pm_runtime_get_sync(codec->dev); if (ret < 0) { dev_err(codec->dev, "Enabling device failed: %d\n", ret); goto probe_fail; } snd_soc_update_bits(codec, TAS2552_CFG_1, TAS2552_MUTE, TAS2552_MUTE); snd_soc_write(codec, TAS2552_CFG_3, TAS2552_I2S_OUT_SEL | TAS2552_DIN_SRC_SEL_AVG_L_R); snd_soc_write(codec, TAS2552_OUTPUT_DATA, TAS2552_PDM_DATA_SEL_V_I | TAS2552_R_DATA_OUT(TAS2552_DATA_OUT_V_DATA)); snd_soc_write(codec, TAS2552_BOOST_APT_CTRL, TAS2552_APT_DELAY_200 | TAS2552_APT_THRESH_20_17); snd_soc_write(codec, TAS2552_CFG_2, TAS2552_BOOST_EN | TAS2552_APT_EN | TAS2552_LIM_EN); return 0; probe_fail: if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); return -EIO; } static int tas2552_codec_remove(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); pm_runtime_put(codec->dev); if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 0); return 0; }; #ifdef CONFIG_PM static int tas2552_suspend(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int ret; ret = regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); if (ret != 0) dev_err(codec->dev, "Failed to disable supplies: %d\n", ret); return 0; } static int tas2552_resume(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int ret; ret = regulator_bulk_enable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); } return 0; } #else #define tas2552_suspend NULL #define tas2552_resume NULL #endif static struct snd_soc_codec_driver soc_codec_dev_tas2552 = { .probe = tas2552_codec_probe, .remove = tas2552_codec_remove, .suspend = tas2552_suspend, .resume = tas2552_resume, .ignore_pmdown_time = true, .controls = tas2552_snd_controls, .num_controls = ARRAY_SIZE(tas2552_snd_controls), .dapm_widgets = tas2552_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tas2552_dapm_widgets), .dapm_routes = tas2552_audio_map, .num_dapm_routes = ARRAY_SIZE(tas2552_audio_map), }; static const struct regmap_config tas2552_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = TAS2552_MAX_REG, .reg_defaults = tas2552_reg_defs, .num_reg_defaults = ARRAY_SIZE(tas2552_reg_defs), .cache_type = REGCACHE_RBTREE, }; static int tas2552_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev; struct tas2552_data *data; int ret; int i; dev = &client->dev; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(data->enable_gpio)) return PTR_ERR(data->enable_gpio); data->tas2552_client = client; data->regmap = devm_regmap_init_i2c(client, &tas2552_regmap_config); if (IS_ERR(data->regmap)) { ret = PTR_ERR(data->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(data->supplies); i++) data->supplies[i].supply = tas2552_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies), data->supplies); if (ret != 0) { dev_err(dev, "Failed to request supplies: %d\n", ret); return ret; } pm_runtime_set_active(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, 1000); pm_runtime_use_autosuspend(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_sync_autosuspend(&client->dev); dev_set_drvdata(&client->dev, data); ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_tas2552, tas2552_dai, ARRAY_SIZE(tas2552_dai)); if (ret < 0) dev_err(&client->dev, "Failed to register codec: %d\n", ret); return ret; } static int tas2552_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); pm_runtime_disable(&client->dev); return 0; } static const struct i2c_device_id tas2552_id[] = { { "tas2552", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tas2552_id); #if IS_ENABLED(CONFIG_OF) static const struct of_device_id tas2552_of_match[] = { { .compatible = "ti,tas2552", }, {}, }; MODULE_DEVICE_TABLE(of, tas2552_of_match); #endif static struct i2c_driver tas2552_i2c_driver = { .driver = { .name = "tas2552", .owner = THIS_MODULE, .of_match_table = of_match_ptr(tas2552_of_match), .pm = &tas2552_pm, }, .probe = tas2552_probe, .remove = tas2552_i2c_remove, .id_table = tas2552_id, }; module_i2c_driver(tas2552_i2c_driver); MODULE_AUTHOR("Dan Muprhy <dmurphy@ti.com>"); MODULE_DESCRIPTION("TAS2552 Audio amplifier driver"); MODULE_LICENSE("GPL");
gpl-2.0
Evil-Green/Ptah-GT-I9300
drivers/usb/serial/ti_usb_3410_5052.c
522
47866
/* vi: ts=8 sw=8 * * TI 3410/5052 USB Serial Driver * * Copyright (C) 2004 Texas Instruments * * This driver is based on the Linux io_ti driver, which is * Copyright (C) 2000-2002 Inside Out Networks * Copyright (C) 2001-2002 Greg Kroah-Hartman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * For questions or problems with this driver, contact Texas Instruments * technical support, or Al Borchers <alborchers@steinerpoint.com>, or * Peter Berger <pberger@brimson.com>. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/ioctl.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "ti_usb_3410_5052.h" /* Defines */ #define TI_DRIVER_VERSION "v0.10" #define TI_DRIVER_AUTHOR "Al Borchers <alborchers@steinerpoint.com>" #define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver" #define TI_FIRMWARE_BUF_SIZE 16284 #define TI_WRITE_BUF_SIZE 1024 #define TI_TRANSFER_TIMEOUT 2 #define TI_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */ /* supported setserial flags */ #define TI_SET_SERIAL_FLAGS 0 /* read urb states */ #define TI_READ_URB_RUNNING 0 #define TI_READ_URB_STOPPING 1 #define TI_READ_URB_STOPPED 2 #define TI_EXTRA_VID_PID_COUNT 5 /* Structures */ struct ti_port { int tp_is_open; __u8 tp_msr; __u8 tp_lsr; __u8 tp_shadow_mcr; __u8 tp_uart_mode; /* 232 or 485 modes */ unsigned int tp_uart_base_addr; int tp_flags; int tp_closing_wait;/* in .01 secs */ struct async_icount tp_icount; wait_queue_head_t tp_msr_wait; /* wait for msr change */ wait_queue_head_t tp_write_wait; struct ti_device *tp_tdev; struct usb_serial_port *tp_port; spinlock_t tp_lock; int tp_read_urb_state; int tp_write_urb_in_use; struct kfifo write_fifo; }; struct ti_device { struct mutex td_open_close_lock; int td_open_port_count; struct usb_serial *td_serial; int td_is_3410; int td_urb_error; }; /* Function Declarations */ static int ti_startup(struct usb_serial *serial); static void ti_release(struct usb_serial *serial); static int ti_open(struct tty_struct *tty, struct usb_serial_port *port); static void ti_close(struct usb_serial_port *port); static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count); static int ti_write_room(struct tty_struct *tty); static int ti_chars_in_buffer(struct tty_struct *tty); static void ti_throttle(struct tty_struct *tty); static void ti_unthrottle(struct tty_struct *tty); static int ti_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static int ti_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); static void ti_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); static int ti_tiocmget(struct tty_struct *tty); static int ti_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void ti_break(struct tty_struct *tty, int break_state); static void ti_interrupt_callback(struct urb *urb); static void ti_bulk_in_callback(struct urb *urb); static void ti_bulk_out_callback(struct urb *urb); static void ti_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length); static void ti_send(struct ti_port *tport); static int ti_set_mcr(struct ti_port *tport, unsigned int mcr); static int ti_get_lsr(struct ti_port *tport); static int ti_get_serial_info(struct ti_port *tport, struct serial_struct __user *ret_arg); static int ti_set_serial_info(struct tty_struct *tty, struct ti_port *tport, struct serial_struct __user *new_arg); static void ti_handle_new_msr(struct ti_port *tport, __u8 msr); static void ti_drain(struct ti_port *tport, unsigned long timeout, int flush); static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty); static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty); static int ti_command_out_sync(struct ti_device *tdev, __u8 command, __u16 moduleid, __u16 value, __u8 *data, int size); static int ti_command_in_sync(struct ti_device *tdev, __u8 command, __u16 moduleid, __u16 value, __u8 *data, int size); static int ti_write_byte(struct ti_device *tdev, unsigned long addr, __u8 mask, __u8 byte); static int ti_download_firmware(struct ti_device *tdev); /* Data */ /* module parameters */ static int debug; static int closing_wait = TI_DEFAULT_CLOSING_WAIT; static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT]; static unsigned int vendor_3410_count; static ushort product_3410[TI_EXTRA_VID_PID_COUNT]; static unsigned int product_3410_count; static ushort vendor_5052[TI_EXTRA_VID_PID_COUNT]; static unsigned int vendor_5052_count; static ushort product_5052[TI_EXTRA_VID_PID_COUNT]; static unsigned int product_5052_count; /* supported devices */ /* the array dimension is the number of default entries plus */ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ /* null entry */ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, }; static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, { } }; static struct usb_driver ti_usb_driver = { .name = "ti_usb_3410_5052", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = ti_id_table_combined, .no_dynamic_id = 1, }; static struct usb_serial_driver ti_1port_device = { .driver = { .owner = THIS_MODULE, .name = "ti_usb_3410_5052_1", }, .description = "TI USB 3410 1 port adapter", .usb_driver = &ti_usb_driver, .id_table = ti_id_table_3410, .num_ports = 1, .attach = ti_startup, .release = ti_release, .open = ti_open, .close = ti_close, .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, .set_termios = ti_set_termios, .tiocmget = ti_tiocmget, .tiocmset = ti_tiocmset, .get_icount = ti_get_icount, .break_ctl = ti_break, .read_int_callback = ti_interrupt_callback, .read_bulk_callback = ti_bulk_in_callback, .write_bulk_callback = ti_bulk_out_callback, }; static struct usb_serial_driver ti_2port_device = { .driver = { .owner = THIS_MODULE, .name = "ti_usb_3410_5052_2", }, .description = "TI USB 5052 2 port adapter", .usb_driver = &ti_usb_driver, .id_table = ti_id_table_5052, .num_ports = 2, .attach = ti_startup, .release = ti_release, .open = ti_open, .close = ti_close, .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, .set_termios = ti_set_termios, .tiocmget = ti_tiocmget, .tiocmset = ti_tiocmset, .get_icount = ti_get_icount, .break_ctl = ti_break, .read_int_callback = ti_interrupt_callback, .read_bulk_callback = ti_bulk_in_callback, .write_bulk_callback = ti_bulk_out_callback, }; /* Module */ MODULE_AUTHOR(TI_DRIVER_AUTHOR); MODULE_DESCRIPTION(TI_DRIVER_DESC); MODULE_VERSION(TI_DRIVER_VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("ti_3410.fw"); MODULE_FIRMWARE("ti_5052.fw"); MODULE_FIRMWARE("mts_cdma.fw"); MODULE_FIRMWARE("mts_gsm.fw"); MODULE_FIRMWARE("mts_edge.fw"); MODULE_FIRMWARE("mts_mt9234mu.fw"); MODULE_FIRMWARE("mts_mt9234zba.fw"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); module_param(closing_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain in close, in .01 secs, default is 4000"); module_param_array(vendor_3410, ushort, &vendor_3410_count, S_IRUGO); MODULE_PARM_DESC(vendor_3410, "Vendor ids for 3410 based devices, 1-5 short integers"); module_param_array(product_3410, ushort, &product_3410_count, S_IRUGO); MODULE_PARM_DESC(product_3410, "Product ids for 3410 based devices, 1-5 short integers"); module_param_array(vendor_5052, ushort, &vendor_5052_count, S_IRUGO); MODULE_PARM_DESC(vendor_5052, "Vendor ids for 5052 based devices, 1-5 short integers"); module_param_array(product_5052, ushort, &product_5052_count, S_IRUGO); MODULE_PARM_DESC(product_5052, "Product ids for 5052 based devices, 1-5 short integers"); MODULE_DEVICE_TABLE(usb, ti_id_table_combined); /* Functions */ static int __init ti_init(void) { int i, j, c; int ret; /* insert extra vendor and product ids */ c = ARRAY_SIZE(ti_id_table_combined) - 2 * TI_EXTRA_VID_PID_COUNT - 1; j = ARRAY_SIZE(ti_id_table_3410) - TI_EXTRA_VID_PID_COUNT - 1; for (i = 0; i < min(vendor_3410_count, product_3410_count); i++, j++, c++) { ti_id_table_3410[j].idVendor = vendor_3410[i]; ti_id_table_3410[j].idProduct = product_3410[i]; ti_id_table_3410[j].match_flags = USB_DEVICE_ID_MATCH_DEVICE; ti_id_table_combined[c].idVendor = vendor_3410[i]; ti_id_table_combined[c].idProduct = product_3410[i]; ti_id_table_combined[c].match_flags = USB_DEVICE_ID_MATCH_DEVICE; } j = ARRAY_SIZE(ti_id_table_5052) - TI_EXTRA_VID_PID_COUNT - 1; for (i = 0; i < min(vendor_5052_count, product_5052_count); i++, j++, c++) { ti_id_table_5052[j].idVendor = vendor_5052[i]; ti_id_table_5052[j].idProduct = product_5052[i]; ti_id_table_5052[j].match_flags = USB_DEVICE_ID_MATCH_DEVICE; ti_id_table_combined[c].idVendor = vendor_5052[i]; ti_id_table_combined[c].idProduct = product_5052[i]; ti_id_table_combined[c].match_flags = USB_DEVICE_ID_MATCH_DEVICE; } ret = usb_serial_register(&ti_1port_device); if (ret) goto failed_1port; ret = usb_serial_register(&ti_2port_device); if (ret) goto failed_2port; ret = usb_register(&ti_usb_driver); if (ret) goto failed_usb; printk(KERN_INFO KBUILD_MODNAME ": " TI_DRIVER_VERSION ":" TI_DRIVER_DESC "\n"); return 0; failed_usb: usb_serial_deregister(&ti_2port_device); failed_2port: usb_serial_deregister(&ti_1port_device); failed_1port: return ret; } static void __exit ti_exit(void) { usb_deregister(&ti_usb_driver); usb_serial_deregister(&ti_1port_device); usb_serial_deregister(&ti_2port_device); } module_init(ti_init); module_exit(ti_exit); static int ti_startup(struct usb_serial *serial) { struct ti_device *tdev; struct ti_port *tport; struct usb_device *dev = serial->dev; int status; int i; dbg("%s - product 0x%4X, num configurations %d, configuration value %d", __func__, le16_to_cpu(dev->descriptor.idProduct), dev->descriptor.bNumConfigurations, dev->actconfig->desc.bConfigurationValue); /* create device structure */ tdev = kzalloc(sizeof(struct ti_device), GFP_KERNEL); if (tdev == NULL) { dev_err(&dev->dev, "%s - out of memory\n", __func__); return -ENOMEM; } mutex_init(&tdev->td_open_close_lock); tdev->td_serial = serial; usb_set_serial_data(serial, tdev); /* determine device type */ if (serial->type == &ti_1port_device) tdev->td_is_3410 = 1; dbg("%s - device type is %s", __func__, tdev->td_is_3410 ? "3410" : "5052"); /* if we have only 1 configuration, download firmware */ if (dev->descriptor.bNumConfigurations == 1) { if ((status = ti_download_firmware(tdev)) != 0) goto free_tdev; /* 3410 must be reset, 5052 resets itself */ if (tdev->td_is_3410) { msleep_interruptible(100); usb_reset_device(dev); } status = -ENODEV; goto free_tdev; } /* the second configuration must be set */ if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) { status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG); status = status ? status : -ENODEV; goto free_tdev; } /* set up port structures */ for (i = 0; i < serial->num_ports; ++i) { tport = kzalloc(sizeof(struct ti_port), GFP_KERNEL); if (tport == NULL) { dev_err(&dev->dev, "%s - out of memory\n", __func__); status = -ENOMEM; goto free_tports; } spin_lock_init(&tport->tp_lock); tport->tp_uart_base_addr = (i == 0 ? TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR); tport->tp_closing_wait = closing_wait; init_waitqueue_head(&tport->tp_msr_wait); init_waitqueue_head(&tport->tp_write_wait); if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { dev_err(&dev->dev, "%s - out of memory\n", __func__); kfree(tport); status = -ENOMEM; goto free_tports; } tport->tp_port = serial->port[i]; tport->tp_tdev = tdev; usb_set_serial_port_data(serial->port[i], tport); tport->tp_uart_mode = 0; /* default is RS232 */ } return 0; free_tports: for (--i; i >= 0; --i) { tport = usb_get_serial_port_data(serial->port[i]); kfifo_free(&tport->write_fifo); kfree(tport); usb_set_serial_port_data(serial->port[i], NULL); } free_tdev: kfree(tdev); usb_set_serial_data(serial, NULL); return status; } static void ti_release(struct usb_serial *serial) { int i; struct ti_device *tdev = usb_get_serial_data(serial); struct ti_port *tport; dbg("%s", __func__); for (i = 0; i < serial->num_ports; ++i) { tport = usb_get_serial_port_data(serial->port[i]); if (tport) { kfifo_free(&tport->write_fifo); kfree(tport); } } kfree(tdev); } static int ti_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ti_port *tport = usb_get_serial_port_data(port); struct ti_device *tdev; struct usb_device *dev; struct urb *urb; int port_number; int status; __u16 open_settings = (__u8)(TI_PIPE_MODE_CONTINOUS | TI_PIPE_TIMEOUT_ENABLE | (TI_TRANSFER_TIMEOUT << 2)); dbg("%s - port %d", __func__, port->number); if (tport == NULL) return -ENODEV; dev = port->serial->dev; tdev = tport->tp_tdev; /* only one open on any port on a device at a time */ if (mutex_lock_interruptible(&tdev->td_open_close_lock)) return -ERESTARTSYS; port_number = port->number - port->serial->minor; memset(&(tport->tp_icount), 0x00, sizeof(tport->tp_icount)); tport->tp_msr = 0; tport->tp_shadow_mcr |= (TI_MCR_RTS | TI_MCR_DTR); /* start interrupt urb the first time a port is opened on this device */ if (tdev->td_open_port_count == 0) { dbg("%s - start interrupt in urb", __func__); urb = tdev->td_serial->port[0]->interrupt_in_urb; if (!urb) { dev_err(&port->dev, "%s - no interrupt urb\n", __func__); status = -EINVAL; goto release_lock; } urb->complete = ti_interrupt_callback; urb->context = tdev; urb->dev = dev; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - submit interrupt urb failed, %d\n", __func__, status); goto release_lock; } } if (tty) ti_set_termios(tty, port, tty->termios); dbg("%s - sending TI_OPEN_PORT", __func__); status = ti_command_out_sync(tdev, TI_OPEN_PORT, (__u8)(TI_UART1_PORT + port_number), open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command, %d\n", __func__, status); goto unlink_int_urb; } dbg("%s - sending TI_START_PORT", __func__); status = ti_command_out_sync(tdev, TI_START_PORT, (__u8)(TI_UART1_PORT + port_number), 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start command, %d\n", __func__, status); goto unlink_int_urb; } dbg("%s - sending TI_PURGE_PORT", __func__); status = ti_command_out_sync(tdev, TI_PURGE_PORT, (__u8)(TI_UART1_PORT + port_number), TI_PURGE_INPUT, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot clear input buffers, %d\n", __func__, status); goto unlink_int_urb; } status = ti_command_out_sync(tdev, TI_PURGE_PORT, (__u8)(TI_UART1_PORT + port_number), TI_PURGE_OUTPUT, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot clear output buffers, %d\n", __func__, status); goto unlink_int_urb; } /* reset the data toggle on the bulk endpoints to work around bug in * host controllers where things get out of sync some times */ usb_clear_halt(dev, port->write_urb->pipe); usb_clear_halt(dev, port->read_urb->pipe); if (tty) ti_set_termios(tty, port, tty->termios); dbg("%s - sending TI_OPEN_PORT (2)", __func__); status = ti_command_out_sync(tdev, TI_OPEN_PORT, (__u8)(TI_UART1_PORT + port_number), open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command (2), %d\n", __func__, status); goto unlink_int_urb; } dbg("%s - sending TI_START_PORT (2)", __func__); status = ti_command_out_sync(tdev, TI_START_PORT, (__u8)(TI_UART1_PORT + port_number), 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start command (2), %d\n", __func__, status); goto unlink_int_urb; } /* start read urb */ dbg("%s - start read urb", __func__); urb = port->read_urb; if (!urb) { dev_err(&port->dev, "%s - no read urb\n", __func__); status = -EINVAL; goto unlink_int_urb; } tport->tp_read_urb_state = TI_READ_URB_RUNNING; urb->complete = ti_bulk_in_callback; urb->context = tport; urb->dev = dev; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - submit read urb failed, %d\n", __func__, status); goto unlink_int_urb; } tport->tp_is_open = 1; ++tdev->td_open_port_count; goto release_lock; unlink_int_urb: if (tdev->td_open_port_count == 0) usb_kill_urb(port->serial->port[0]->interrupt_in_urb); release_lock: mutex_unlock(&tdev->td_open_close_lock); dbg("%s - exit %d", __func__, status); return status; } static void ti_close(struct usb_serial_port *port) { struct ti_device *tdev; struct ti_port *tport; int port_number; int status; int do_unlock; dbg("%s - port %d", __func__, port->number); tdev = usb_get_serial_data(port->serial); tport = usb_get_serial_port_data(port); if (tdev == NULL || tport == NULL) return; tport->tp_is_open = 0; ti_drain(tport, (tport->tp_closing_wait*HZ)/100, 1); usb_kill_urb(port->read_urb); usb_kill_urb(port->write_urb); tport->tp_write_urb_in_use = 0; port_number = port->number - port->serial->minor; dbg("%s - sending TI_CLOSE_PORT", __func__); status = ti_command_out_sync(tdev, TI_CLOSE_PORT, (__u8)(TI_UART1_PORT + port_number), 0, NULL, 0); if (status) dev_err(&port->dev, "%s - cannot send close port command, %d\n" , __func__, status); /* if mutex_lock is interrupted, continue anyway */ do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock); --tport->tp_tdev->td_open_port_count; if (tport->tp_tdev->td_open_port_count <= 0) { /* last port is closed, shut down interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); tport->tp_tdev->td_open_port_count = 0; } if (do_unlock) mutex_unlock(&tdev->td_open_close_lock); dbg("%s - exit", __func__); } static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct ti_port *tport = usb_get_serial_port_data(port); dbg("%s - port %d", __func__, port->number); if (count == 0) { dbg("%s - write request of 0 bytes", __func__); return 0; } if (tport == NULL || !tport->tp_is_open) return -ENODEV; count = kfifo_in_locked(&tport->write_fifo, data, count, &tport->tp_lock); ti_send(tport); return count; } static int ti_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); int room = 0; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (tport == NULL) return 0; spin_lock_irqsave(&tport->tp_lock, flags); room = kfifo_avail(&tport->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); dbg("%s - returns %d", __func__, room); return room; } static int ti_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (tport == NULL) return 0; spin_lock_irqsave(&tport->tp_lock, flags); chars = kfifo_len(&tport->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; } static void ti_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); dbg("%s - port %d", __func__, port->number); if (tport == NULL) return; if (I_IXOFF(tty) || C_CRTSCTS(tty)) ti_stop_read(tport, tty); } static void ti_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); int status; dbg("%s - port %d", __func__, port->number); if (tport == NULL) return; if (I_IXOFF(tty) || C_CRTSCTS(tty)) { status = ti_restart_read(tport, tty); if (status) dev_err(&port->dev, "%s - cannot restart read, %d\n", __func__, status); } } static int ti_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); struct async_icount cnow = tport->tp_icount; dbg("%s - (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, port->number, cnow.rx, cnow.tx); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } static int ti_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); struct async_icount cnow; struct async_icount cprev; dbg("%s - port %d, cmd = 0x%04X", __func__, port->number, cmd); if (tport == NULL) return -ENODEV; switch (cmd) { case TIOCGSERIAL: dbg("%s - (%d) TIOCGSERIAL", __func__, port->number); return ti_get_serial_info(tport, (struct serial_struct __user *)arg); case TIOCSSERIAL: dbg("%s - (%d) TIOCSSERIAL", __func__, port->number); return ti_set_serial_info(tty, tport, (struct serial_struct __user *)arg); case TIOCMIWAIT: dbg("%s - (%d) TIOCMIWAIT", __func__, port->number); cprev = tport->tp_icount; while (1) { interruptible_sleep_on(&tport->tp_msr_wait); if (signal_pending(current)) return -ERESTARTSYS; cnow = tport->tp_icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) return 0; cprev = cnow; } break; } return -ENOIOCTLCMD; } static void ti_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct ti_port *tport = usb_get_serial_port_data(port); struct ti_uart_config *config; tcflag_t cflag, iflag; int baud; int status; int port_number = port->number - port->serial->minor; unsigned int mcr; dbg("%s - port %d", __func__, port->number); cflag = tty->termios->c_cflag; iflag = tty->termios->c_iflag; dbg("%s - cflag %08x, iflag %08x", __func__, cflag, iflag); dbg("%s - old clfag %08x, old iflag %08x", __func__, old_termios->c_cflag, old_termios->c_iflag); if (tport == NULL) return; config = kmalloc(sizeof(*config), GFP_KERNEL); if (!config) { dev_err(&port->dev, "%s - out of memory\n", __func__); return; } config->wFlags = 0; /* these flags must be set */ config->wFlags |= TI_UART_ENABLE_MS_INTS; config->wFlags |= TI_UART_ENABLE_AUTO_START_DMA; config->bUartMode = (__u8)(tport->tp_uart_mode); switch (cflag & CSIZE) { case CS5: config->bDataBits = TI_UART_5_DATA_BITS; break; case CS6: config->bDataBits = TI_UART_6_DATA_BITS; break; case CS7: config->bDataBits = TI_UART_7_DATA_BITS; break; default: case CS8: config->bDataBits = TI_UART_8_DATA_BITS; break; } /* CMSPAR isn't supported by this driver */ tty->termios->c_cflag &= ~CMSPAR; if (cflag & PARENB) { if (cflag & PARODD) { config->wFlags |= TI_UART_ENABLE_PARITY_CHECKING; config->bParity = TI_UART_ODD_PARITY; } else { config->wFlags |= TI_UART_ENABLE_PARITY_CHECKING; config->bParity = TI_UART_EVEN_PARITY; } } else { config->wFlags &= ~TI_UART_ENABLE_PARITY_CHECKING; config->bParity = TI_UART_NO_PARITY; } if (cflag & CSTOPB) config->bStopBits = TI_UART_2_STOP_BITS; else config->bStopBits = TI_UART_1_STOP_BITS; if (cflag & CRTSCTS) { /* RTS flow control must be off to drop RTS for baud rate B0 */ if ((cflag & CBAUD) != B0) config->wFlags |= TI_UART_ENABLE_RTS_IN; config->wFlags |= TI_UART_ENABLE_CTS_OUT; } else { tty->hw_stopped = 0; ti_restart_read(tport, tty); } if (I_IXOFF(tty) || I_IXON(tty)) { config->cXon = START_CHAR(tty); config->cXoff = STOP_CHAR(tty); if (I_IXOFF(tty)) config->wFlags |= TI_UART_ENABLE_X_IN; else ti_restart_read(tport, tty); if (I_IXON(tty)) config->wFlags |= TI_UART_ENABLE_X_OUT; } baud = tty_get_baud_rate(tty); if (!baud) baud = 9600; if (tport->tp_tdev->td_is_3410) config->wBaudRate = (__u16)((923077 + baud/2) / baud); else config->wBaudRate = (__u16)((461538 + baud/2) / baud); /* FIXME: Should calculate resulting baud here and report it back */ if ((cflag & CBAUD) != B0) tty_encode_baud_rate(tty, baud, baud); dbg("%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d", __func__, baud, config->wBaudRate, config->wFlags, config->bDataBits, config->bParity, config->bStopBits, config->cXon, config->cXoff, config->bUartMode); cpu_to_be16s(&config->wBaudRate); cpu_to_be16s(&config->wFlags); status = ti_command_out_sync(tport->tp_tdev, TI_SET_CONFIG, (__u8)(TI_UART1_PORT + port_number), 0, (__u8 *)config, sizeof(*config)); if (status) dev_err(&port->dev, "%s - cannot set config on port %d, %d\n", __func__, port_number, status); /* SET_CONFIG asserts RTS and DTR, reset them correctly */ mcr = tport->tp_shadow_mcr; /* if baud rate is B0, clear RTS and DTR */ if ((cflag & CBAUD) == B0) mcr &= ~(TI_MCR_DTR | TI_MCR_RTS); status = ti_set_mcr(tport, mcr); if (status) dev_err(&port->dev, "%s - cannot set modem control on port %d, %d\n", __func__, port_number, status); kfree(config); } static int ti_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); unsigned int result; unsigned int msr; unsigned int mcr; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (tport == NULL) return -ENODEV; spin_lock_irqsave(&tport->tp_lock, flags); msr = tport->tp_msr; mcr = tport->tp_shadow_mcr; spin_unlock_irqrestore(&tport->tp_lock, flags); result = ((mcr & TI_MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & TI_MCR_RTS) ? TIOCM_RTS : 0) | ((mcr & TI_MCR_LOOP) ? TIOCM_LOOP : 0) | ((msr & TI_MSR_CTS) ? TIOCM_CTS : 0) | ((msr & TI_MSR_CD) ? TIOCM_CAR : 0) | ((msr & TI_MSR_RI) ? TIOCM_RI : 0) | ((msr & TI_MSR_DSR) ? TIOCM_DSR : 0); dbg("%s - 0x%04X", __func__, result); return result; } static int ti_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); unsigned int mcr; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (tport == NULL) return -ENODEV; spin_lock_irqsave(&tport->tp_lock, flags); mcr = tport->tp_shadow_mcr; if (set & TIOCM_RTS) mcr |= TI_MCR_RTS; if (set & TIOCM_DTR) mcr |= TI_MCR_DTR; if (set & TIOCM_LOOP) mcr |= TI_MCR_LOOP; if (clear & TIOCM_RTS) mcr &= ~TI_MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~TI_MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~TI_MCR_LOOP; spin_unlock_irqrestore(&tport->tp_lock, flags); return ti_set_mcr(tport, mcr); } static void ti_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); int status; dbg("%s - state = %d", __func__, break_state); if (tport == NULL) return; ti_drain(tport, (tport->tp_closing_wait*HZ)/100, 0); status = ti_write_byte(tport->tp_tdev, tport->tp_uart_base_addr + TI_UART_OFFSET_LCR, TI_LCR_BREAK, break_state == -1 ? TI_LCR_BREAK : 0); if (status) dbg("%s - error setting break, %d", __func__, status); } static void ti_interrupt_callback(struct urb *urb) { struct ti_device *tdev = urb->context; struct usb_serial_port *port; struct usb_serial *serial = tdev->td_serial; struct ti_port *tport; struct device *dev = &urb->dev->dev; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int port_number; int function; int status = urb->status; int retval; __u8 msr; dbg("%s", __func__); switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("%s - urb shutting down, %d", __func__, status); tdev->td_urb_error = 1; return; default: dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status); tdev->td_urb_error = 1; goto exit; } if (length != 2) { dbg("%s - bad packet size, %d", __func__, length); goto exit; } if (data[0] == TI_CODE_HARDWARE_ERROR) { dev_err(dev, "%s - hardware error, %d\n", __func__, data[1]); goto exit; } port_number = TI_GET_PORT_FROM_CODE(data[0]); function = TI_GET_FUNC_FROM_CODE(data[0]); dbg("%s - port_number %d, function %d, data 0x%02X", __func__, port_number, function, data[1]); if (port_number >= serial->num_ports) { dev_err(dev, "%s - bad port number, %d\n", __func__, port_number); goto exit; } port = serial->port[port_number]; tport = usb_get_serial_port_data(port); if (!tport) goto exit; switch (function) { case TI_CODE_DATA_ERROR: dev_err(dev, "%s - DATA ERROR, port %d, data 0x%02X\n", __func__, port_number, data[1]); break; case TI_CODE_MODEM_STATUS: msr = data[1]; dbg("%s - port %d, msr 0x%02X", __func__, port_number, msr); ti_handle_new_msr(tport, msr); break; default: dev_err(dev, "%s - unknown interrupt code, 0x%02X\n", __func__, data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "%s - resubmit interrupt urb failed, %d\n", __func__, retval); } static void ti_bulk_in_callback(struct urb *urb) { struct ti_port *tport = urb->context; struct usb_serial_port *port = tport->tp_port; struct device *dev = &urb->dev->dev; int status = urb->status; int retval = 0; struct tty_struct *tty; dbg("%s", __func__); switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("%s - urb shutting down, %d", __func__, status); tport->tp_tdev->td_urb_error = 1; wake_up_interruptible(&tport->tp_write_wait); return; default: dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status); tport->tp_tdev->td_urb_error = 1; wake_up_interruptible(&tport->tp_write_wait); } if (status == -EPIPE) goto exit; if (status) { dev_err(dev, "%s - stopping read!\n", __func__); return; } tty = tty_port_tty_get(&port->port); if (tty) { if (urb->actual_length) { usb_serial_debug_data(debug, dev, __func__, urb->actual_length, urb->transfer_buffer); if (!tport->tp_is_open) dbg("%s - port closed, dropping data", __func__); else ti_recv(&urb->dev->dev, tty, urb->transfer_buffer, urb->actual_length); spin_lock(&tport->tp_lock); tport->tp_icount.rx += urb->actual_length; spin_unlock(&tport->tp_lock); } tty_kref_put(tty); } exit: /* continue to read unless stopping */ spin_lock(&tport->tp_lock); if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) { urb->dev = port->serial->dev; retval = usb_submit_urb(urb, GFP_ATOMIC); } else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING) { tport->tp_read_urb_state = TI_READ_URB_STOPPED; } spin_unlock(&tport->tp_lock); if (retval) dev_err(dev, "%s - resubmit read urb failed, %d\n", __func__, retval); } static void ti_bulk_out_callback(struct urb *urb) { struct ti_port *tport = urb->context; struct usb_serial_port *port = tport->tp_port; struct device *dev = &urb->dev->dev; int status = urb->status; dbg("%s - port %d", __func__, port->number); tport->tp_write_urb_in_use = 0; switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("%s - urb shutting down, %d", __func__, status); tport->tp_tdev->td_urb_error = 1; wake_up_interruptible(&tport->tp_write_wait); return; default: dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status); tport->tp_tdev->td_urb_error = 1; wake_up_interruptible(&tport->tp_write_wait); } /* send any buffered data */ ti_send(tport); } static void ti_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length) { int cnt; do { cnt = tty_insert_flip_string(tty, data, length); if (cnt < length) { dev_err(dev, "%s - dropping data, %d bytes lost\n", __func__, length - cnt); if (cnt == 0) break; } tty_flip_buffer_push(tty); data += cnt; length -= cnt; } while (length > 0); } static void ti_send(struct ti_port *tport) { int count, result; struct usb_serial_port *port = tport->tp_port; struct tty_struct *tty = tty_port_tty_get(&port->port); /* FIXME */ unsigned long flags; dbg("%s - port %d", __func__, port->number); spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_write_urb_in_use) goto unlock; count = kfifo_out(&tport->write_fifo, port->write_urb->transfer_buffer, port->bulk_out_size); if (count == 0) goto unlock; tport->tp_write_urb_in_use = 1; spin_unlock_irqrestore(&tport->tp_lock, flags); usb_serial_debug_data(debug, &port->dev, __func__, count, port->write_urb->transfer_buffer); usb_fill_bulk_urb(port->write_urb, port->serial->dev, usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress), port->write_urb->transfer_buffer, count, ti_bulk_out_callback, tport); result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - submit write urb failed, %d\n", __func__, result); tport->tp_write_urb_in_use = 0; /* TODO: reschedule ti_send */ } else { spin_lock_irqsave(&tport->tp_lock, flags); tport->tp_icount.tx += count; spin_unlock_irqrestore(&tport->tp_lock, flags); } /* more room in the buffer for new writes, wakeup */ if (tty) tty_wakeup(tty); tty_kref_put(tty); wake_up_interruptible(&tport->tp_write_wait); return; unlock: spin_unlock_irqrestore(&tport->tp_lock, flags); tty_kref_put(tty); return; } static int ti_set_mcr(struct ti_port *tport, unsigned int mcr) { unsigned long flags; int status; status = ti_write_byte(tport->tp_tdev, tport->tp_uart_base_addr + TI_UART_OFFSET_MCR, TI_MCR_RTS | TI_MCR_DTR | TI_MCR_LOOP, mcr); spin_lock_irqsave(&tport->tp_lock, flags); if (!status) tport->tp_shadow_mcr = mcr; spin_unlock_irqrestore(&tport->tp_lock, flags); return status; } static int ti_get_lsr(struct ti_port *tport) { int size, status; struct ti_device *tdev = tport->tp_tdev; struct usb_serial_port *port = tport->tp_port; int port_number = port->number - port->serial->minor; struct ti_port_status *data; dbg("%s - port %d", __func__, port->number); size = sizeof(struct ti_port_status); data = kmalloc(size, GFP_KERNEL); if (!data) { dev_err(&port->dev, "%s - out of memory\n", __func__); return -ENOMEM; } status = ti_command_in_sync(tdev, TI_GET_PORT_STATUS, (__u8)(TI_UART1_PORT+port_number), 0, (__u8 *)data, size); if (status) { dev_err(&port->dev, "%s - get port status command failed, %d\n", __func__, status); goto free_data; } dbg("%s - lsr 0x%02X", __func__, data->bLSR); tport->tp_lsr = data->bLSR; free_data: kfree(data); return status; } static int ti_get_serial_info(struct ti_port *tport, struct serial_struct __user *ret_arg) { struct usb_serial_port *port = tport->tp_port; struct serial_struct ret_serial; if (!ret_arg) return -EFAULT; memset(&ret_serial, 0, sizeof(ret_serial)); ret_serial.type = PORT_16550A; ret_serial.line = port->serial->minor; ret_serial.port = port->number - port->serial->minor; ret_serial.flags = tport->tp_flags; ret_serial.xmit_fifo_size = TI_WRITE_BUF_SIZE; ret_serial.baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800; ret_serial.closing_wait = tport->tp_closing_wait; if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg))) return -EFAULT; return 0; } static int ti_set_serial_info(struct tty_struct *tty, struct ti_port *tport, struct serial_struct __user *new_arg) { struct serial_struct new_serial; if (copy_from_user(&new_serial, new_arg, sizeof(new_serial))) return -EFAULT; tport->tp_flags = new_serial.flags & TI_SET_SERIAL_FLAGS; tport->tp_closing_wait = new_serial.closing_wait; return 0; } static void ti_handle_new_msr(struct ti_port *tport, __u8 msr) { struct async_icount *icount; struct tty_struct *tty; unsigned long flags; dbg("%s - msr 0x%02X", __func__, msr); if (msr & TI_MSR_DELTA_MASK) { spin_lock_irqsave(&tport->tp_lock, flags); icount = &tport->tp_icount; if (msr & TI_MSR_DELTA_CTS) icount->cts++; if (msr & TI_MSR_DELTA_DSR) icount->dsr++; if (msr & TI_MSR_DELTA_CD) icount->dcd++; if (msr & TI_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&tport->tp_msr_wait); spin_unlock_irqrestore(&tport->tp_lock, flags); } tport->tp_msr = msr & TI_MSR_MASK; /* handle CTS flow control */ tty = tty_port_tty_get(&tport->tp_port->port); if (tty && C_CRTSCTS(tty)) { if (msr & TI_MSR_CTS) { tty->hw_stopped = 0; tty_wakeup(tty); } else { tty->hw_stopped = 1; } } tty_kref_put(tty); } static void ti_drain(struct ti_port *tport, unsigned long timeout, int flush) { struct ti_device *tdev = tport->tp_tdev; struct usb_serial_port *port = tport->tp_port; wait_queue_t wait; dbg("%s - port %d", __func__, port->number); spin_lock_irq(&tport->tp_lock); /* wait for data to drain from the buffer */ tdev->td_urb_error = 0; init_waitqueue_entry(&wait, current); add_wait_queue(&tport->tp_write_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kfifo_len(&tport->write_fifo) == 0 || timeout == 0 || signal_pending(current) || tdev->td_urb_error || port->serial->disconnected) /* disconnect */ break; spin_unlock_irq(&tport->tp_lock); timeout = schedule_timeout(timeout); spin_lock_irq(&tport->tp_lock); } set_current_state(TASK_RUNNING); remove_wait_queue(&tport->tp_write_wait, &wait); /* flush any remaining data in the buffer */ if (flush) kfifo_reset_out(&tport->write_fifo); spin_unlock_irq(&tport->tp_lock); mutex_lock(&port->serial->disc_mutex); /* wait for data to drain from the device */ /* wait for empty tx register, plus 20 ms */ timeout += jiffies; tport->tp_lsr &= ~TI_LSR_TX_EMPTY; while ((long)(jiffies - timeout) < 0 && !signal_pending(current) && !(tport->tp_lsr&TI_LSR_TX_EMPTY) && !tdev->td_urb_error && !port->serial->disconnected) { if (ti_get_lsr(tport)) break; mutex_unlock(&port->serial->disc_mutex); msleep_interruptible(20); mutex_lock(&port->serial->disc_mutex); } mutex_unlock(&port->serial->disc_mutex); } static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) tport->tp_read_urb_state = TI_READ_URB_STOPPING; spin_unlock_irqrestore(&tport->tp_lock, flags); } static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty) { struct urb *urb; int status = 0; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_read_urb_state == TI_READ_URB_STOPPED) { tport->tp_read_urb_state = TI_READ_URB_RUNNING; urb = tport->tp_port->read_urb; spin_unlock_irqrestore(&tport->tp_lock, flags); urb->complete = ti_bulk_in_callback; urb->context = tport; urb->dev = tport->tp_port->serial->dev; status = usb_submit_urb(urb, GFP_KERNEL); } else { tport->tp_read_urb_state = TI_READ_URB_RUNNING; spin_unlock_irqrestore(&tport->tp_lock, flags); } return status; } static int ti_command_out_sync(struct ti_device *tdev, __u8 command, __u16 moduleid, __u16 value, __u8 *data, int size) { int status; status = usb_control_msg(tdev->td_serial->dev, usb_sndctrlpipe(tdev->td_serial->dev, 0), command, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT), value, moduleid, data, size, 1000); if (status == size) status = 0; if (status > 0) status = -ECOMM; return status; } static int ti_command_in_sync(struct ti_device *tdev, __u8 command, __u16 moduleid, __u16 value, __u8 *data, int size) { int status; status = usb_control_msg(tdev->td_serial->dev, usb_rcvctrlpipe(tdev->td_serial->dev, 0), command, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN), value, moduleid, data, size, 1000); if (status == size) status = 0; if (status > 0) status = -ECOMM; return status; } static int ti_write_byte(struct ti_device *tdev, unsigned long addr, __u8 mask, __u8 byte) { int status; unsigned int size; struct ti_write_data_bytes *data; struct device *dev = &tdev->td_serial->dev->dev; dbg("%s - addr 0x%08lX, mask 0x%02X, byte 0x%02X", __func__, addr, mask, byte); size = sizeof(struct ti_write_data_bytes) + 2; data = kmalloc(size, GFP_KERNEL); if (!data) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } data->bAddrType = TI_RW_DATA_ADDR_XDATA; data->bDataType = TI_RW_DATA_BYTE; data->bDataCounter = 1; data->wBaseAddrHi = cpu_to_be16(addr>>16); data->wBaseAddrLo = cpu_to_be16(addr); data->bData[0] = mask; data->bData[1] = byte; status = ti_command_out_sync(tdev, TI_WRITE_DATA, TI_RAM_PORT, 0, (__u8 *)data, size); if (status < 0) dev_err(dev, "%s - failed, %d\n", __func__, status); kfree(data); return status; } static int ti_do_download(struct usb_device *dev, int pipe, u8 *buffer, int size) { int pos; u8 cs = 0; int done; struct ti_firmware_header *header; int status = 0; int len; for (pos = sizeof(struct ti_firmware_header); pos < size; pos++) cs = (__u8)(cs + buffer[pos]); header = (struct ti_firmware_header *)buffer; header->wLength = cpu_to_le16((__u16)(size - sizeof(struct ti_firmware_header))); header->bCheckSum = cs; dbg("%s - downloading firmware", __func__); for (pos = 0; pos < size; pos += done) { len = min(size - pos, TI_DOWNLOAD_MAX_PACKET_SIZE); status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done, 1000); if (status) break; } return status; } static int ti_download_firmware(struct ti_device *tdev) { int status; int buffer_size; __u8 *buffer; struct usb_device *dev = tdev->td_serial->dev; unsigned int pipe = usb_sndbulkpipe(dev, tdev->td_serial->port[0]->bulk_out_endpointAddress); const struct firmware *fw_p; char buf[32]; dbg("%s\n", __func__); /* try ID specific firmware first, then try generic firmware */ sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, dev->descriptor.idProduct); if ((status = request_firmware(&fw_p, buf, &dev->dev)) != 0) { buf[0] = '\0'; if (dev->descriptor.idVendor == MTS_VENDOR_ID) { switch (dev->descriptor.idProduct) { case MTS_CDMA_PRODUCT_ID: strcpy(buf, "mts_cdma.fw"); break; case MTS_GSM_PRODUCT_ID: strcpy(buf, "mts_gsm.fw"); break; case MTS_EDGE_PRODUCT_ID: strcpy(buf, "mts_edge.fw"); break; case MTS_MT9234MU_PRODUCT_ID: strcpy(buf, "mts_mt9234mu.fw"); break; case MTS_MT9234ZBA_PRODUCT_ID: strcpy(buf, "mts_mt9234zba.fw"); break; case MTS_MT9234ZBAOLD_PRODUCT_ID: strcpy(buf, "mts_mt9234zba.fw"); break; } } if (buf[0] == '\0') { if (tdev->td_is_3410) strcpy(buf, "ti_3410.fw"); else strcpy(buf, "ti_5052.fw"); } status = request_firmware(&fw_p, buf, &dev->dev); } if (status) { dev_err(&dev->dev, "%s - firmware not found\n", __func__); return -ENOENT; } if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size); release_firmware(fw_p); return -ENOENT; } buffer_size = TI_FIRMWARE_BUF_SIZE + sizeof(struct ti_firmware_header); buffer = kmalloc(buffer_size, GFP_KERNEL); if (buffer) { memcpy(buffer, fw_p->data, fw_p->size); memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); status = ti_do_download(dev, pipe, buffer, fw_p->size); kfree(buffer); } else { dbg("%s ENOMEM\n", __func__); status = -ENOMEM; } release_firmware(fw_p); if (status) { dev_err(&dev->dev, "%s - error downloading firmware, %d\n", __func__, status); return status; } dbg("%s - download successful", __func__); return 0; }
gpl-2.0
jdkernel/mecha_sense_2.6.35
arch/arm/mach-mx25/mach-mx25pdk.c
778
4582
/* * Copyright 2009 Sascha Hauer, <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include <linux/types.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/fec.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/memory.h> #include <asm/mach/map.h> #include <mach/common.h> #include <mach/imx-uart.h> #include <mach/mx25.h> #include <mach/mxc_nand.h> #include <mach/imxfb.h> #include "devices.h" #include <mach/iomux-mx25.h> static struct imxuart_platform_data uart_pdata = { .flags = IMXUART_HAVE_RTSCTS, }; static struct pad_desc mx25pdk_pads[] = { MX25_PAD_FEC_MDC__FEC_MDC, MX25_PAD_FEC_MDIO__FEC_MDIO, MX25_PAD_FEC_TDATA0__FEC_TDATA0, MX25_PAD_FEC_TDATA1__FEC_TDATA1, MX25_PAD_FEC_TX_EN__FEC_TX_EN, MX25_PAD_FEC_RDATA0__FEC_RDATA0, MX25_PAD_FEC_RDATA1__FEC_RDATA1, MX25_PAD_FEC_RX_DV__FEC_RX_DV, MX25_PAD_FEC_TX_CLK__FEC_TX_CLK, MX25_PAD_A17__GPIO_2_3, /* FEC_EN, GPIO 35 */ MX25_PAD_D12__GPIO_4_8, /* FEC_RESET_B, GPIO 104 */ /* LCD */ MX25_PAD_LD0__LD0, MX25_PAD_LD1__LD1, MX25_PAD_LD2__LD2, MX25_PAD_LD3__LD3, MX25_PAD_LD4__LD4, MX25_PAD_LD5__LD5, MX25_PAD_LD6__LD6, MX25_PAD_LD7__LD7, MX25_PAD_LD8__LD8, MX25_PAD_LD9__LD9, MX25_PAD_LD10__LD10, MX25_PAD_LD11__LD11, MX25_PAD_LD12__LD12, MX25_PAD_LD13__LD13, MX25_PAD_LD14__LD14, MX25_PAD_LD15__LD15, MX25_PAD_GPIO_E__LD16, MX25_PAD_GPIO_F__LD17, MX25_PAD_HSYNC__HSYNC, MX25_PAD_VSYNC__VSYNC, MX25_PAD_LSCLK__LSCLK, MX25_PAD_OE_ACD__OE_ACD, MX25_PAD_CONTRAST__CONTRAST, }; static struct fec_platform_data mx25_fec_pdata = { .phy = PHY_INTERFACE_MODE_RMII, }; #define FEC_ENABLE_GPIO 35 #define FEC_RESET_B_GPIO 104 static void __init mx25pdk_fec_reset(void) { gpio_request(FEC_ENABLE_GPIO, "FEC PHY enable"); gpio_request(FEC_RESET_B_GPIO, "FEC PHY reset"); gpio_direction_output(FEC_ENABLE_GPIO, 0); /* drop PHY power */ gpio_direction_output(FEC_RESET_B_GPIO, 0); /* assert reset */ udelay(2); /* turn on PHY power and lift reset */ gpio_set_value(FEC_ENABLE_GPIO, 1); gpio_set_value(FEC_RESET_B_GPIO, 1); } static struct mxc_nand_platform_data mx25pdk_nand_board_info = { .width = 1, .hw_ecc = 1, .flash_bbt = 1, }; static struct imx_fb_videomode mx25pdk_modes[] = { { .mode = { .name = "CRT-VGA", .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39683, .left_margin = 45, .right_margin = 114, .upper_margin = 33, .lower_margin = 11, .hsync_len = 1, .vsync_len = 1, }, .bpp = 16, .pcr = 0xFA208B80, }, }; static struct imx_fb_platform_data mx25pdk_fb_pdata = { .mode = mx25pdk_modes, .num_modes = ARRAY_SIZE(mx25pdk_modes), .pwmr = 0x00A903FF, .lscr1 = 0x00120300, .dmacr = 0x00020010, }; static void __init mx25pdk_init(void) { mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads, ARRAY_SIZE(mx25pdk_pads)); mxc_register_device(&mxc_uart_device0, &uart_pdata); mxc_register_device(&mxc_usbh2, NULL); mxc_register_device(&mxc_nand_device, &mx25pdk_nand_board_info); mxc_register_device(&mx25_rtc_device, NULL); mxc_register_device(&mx25_fb_device, &mx25pdk_fb_pdata); mx25pdk_fec_reset(); mxc_register_device(&mx25_fec_device, &mx25_fec_pdata); } static void __init mx25pdk_timer_init(void) { mx25_clocks_init(); } static struct sys_timer mx25pdk_timer = { .init = mx25pdk_timer_init, }; MACHINE_START(MX25_3DS, "Freescale MX25PDK (3DS)") /* Maintainer: Freescale Semiconductor, Inc. */ .phys_io = MX25_AIPS1_BASE_ADDR, .io_pg_offst = ((MX25_AIPS1_BASE_ADDR_VIRT) >> 18) & 0xfffc, .boot_params = MX25_PHYS_OFFSET + 0x100, .map_io = mx25_map_io, .init_irq = mx25_init_irq, .init_machine = mx25pdk_init, .timer = &mx25pdk_timer, MACHINE_END
gpl-2.0
pcamarillor/linux
drivers/mfd/wm8350-i2c.c
1034
2246
/* * wm8350-i2c.c -- Generic I2C driver for Wolfson WM8350 PMIC * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * linux@wolfsonmicro.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/err.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/mfd/wm8350/core.h> #include <linux/regmap.h> #include <linux/slab.h> static int wm8350_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8350 *wm8350; struct wm8350_platform_data *pdata = dev_get_platdata(&i2c->dev); int ret = 0; wm8350 = devm_kzalloc(&i2c->dev, sizeof(struct wm8350), GFP_KERNEL); if (wm8350 == NULL) return -ENOMEM; wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap); if (IS_ERR(wm8350->regmap)) { ret = PTR_ERR(wm8350->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); return ret; } i2c_set_clientdata(i2c, wm8350); wm8350->dev = &i2c->dev; return wm8350_device_init(wm8350, i2c->irq, pdata); } static int wm8350_i2c_remove(struct i2c_client *i2c) { struct wm8350 *wm8350 = i2c_get_clientdata(i2c); wm8350_device_exit(wm8350); return 0; } static const struct i2c_device_id wm8350_i2c_id[] = { { "wm8350", 0 }, { "wm8351", 0 }, { "wm8352", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8350_i2c_id); static struct i2c_driver wm8350_i2c_driver = { .driver = { .name = "wm8350", .owner = THIS_MODULE, }, .probe = wm8350_i2c_probe, .remove = wm8350_i2c_remove, .id_table = wm8350_i2c_id, }; static int __init wm8350_i2c_init(void) { return i2c_add_driver(&wm8350_i2c_driver); } /* init early so consumer devices can complete system boot */ subsys_initcall(wm8350_i2c_init); static void __exit wm8350_i2c_exit(void) { i2c_del_driver(&wm8350_i2c_driver); } module_exit(wm8350_i2c_exit); MODULE_DESCRIPTION("I2C support for the WM8350 AudioPlus PMIC"); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_zte_msm8994
drivers/tty/serial/8250/8250_pci.c
1034
129301
/* * Probe module for 8250/16550-type PCI serial ports. * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2001 Russell King, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/tty.h> #include <linux/serial_reg.h> #include <linux/serial_core.h> #include <linux/8250_pci.h> #include <linux/bitops.h> #include <asm/byteorder.h> #include <asm/io.h> #include "8250.h" #undef SERIAL_DEBUG_PCI /* * init function returns: * > 0 - number of ports * = 0 - use board->num_ports * < 0 - error */ struct pci_serial_quirk { u32 vendor; u32 device; u32 subvendor; u32 subdevice; int (*probe)(struct pci_dev *dev); int (*init)(struct pci_dev *dev); int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); void (*exit)(struct pci_dev *dev); }; #define PCI_NUM_BAR_RESOURCES 6 struct serial_private { struct pci_dev *dev; unsigned int nr; void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES]; struct pci_serial_quirk *quirk; int line[0]; }; static int pci_default_setup(struct serial_private*, const struct pciserial_board*, struct uart_8250_port *, int); static void moan_device(const char *str, struct pci_dev *dev) { printk(KERN_WARNING "%s: %s\n" "Please send the output of lspci -vv, this\n" "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" "manufacturer and name of serial board or\n" "modem board to rmk+serial@arm.linux.org.uk.\n", pci_name(dev), str, dev->vendor, dev->device, dev->subsystem_vendor, dev->subsystem_device); } static int setup_port(struct serial_private *priv, struct uart_8250_port *port, int bar, int offset, int regshift) { struct pci_dev *dev = priv->dev; unsigned long base, len; if (bar >= PCI_NUM_BAR_RESOURCES) return -EINVAL; base = pci_resource_start(dev, bar); if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) { len = pci_resource_len(dev, bar); if (!priv->remapped_bar[bar]) priv->remapped_bar[bar] = ioremap_nocache(base, len); if (!priv->remapped_bar[bar]) return -ENOMEM; port->port.iotype = UPIO_MEM; port->port.iobase = 0; port->port.mapbase = base + offset; port->port.membase = priv->remapped_bar[bar] + offset; port->port.regshift = regshift; } else { port->port.iotype = UPIO_PORT; port->port.iobase = base + offset; port->port.mapbase = 0; port->port.membase = NULL; port->port.regshift = 0; } return 0; } /* * ADDI-DATA GmbH communication cards <info@addi-data.com> */ static int addidata_apci7800_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar = 0, offset = board->first_offset; bar = FL_GET_BASE(board->flags); if (idx < 2) { offset += idx * board->uart_offset; } else if ((idx >= 2) && (idx < 4)) { bar += 1; offset += ((idx - 2) * board->uart_offset); } else if ((idx >= 4) && (idx < 6)) { bar += 2; offset += ((idx - 4) * board->uart_offset); } else if (idx >= 6) { bar += 3; offset += ((idx - 6) * board->uart_offset); } return setup_port(priv, port, bar, offset, board->reg_shift); } /* * AFAVLAB uses a different mixture of BARs and offsets * Not that ugly ;) -- HW */ static int afavlab_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar, offset = board->first_offset; bar = FL_GET_BASE(board->flags); if (idx < 4) bar += idx; else { bar = 4; offset += (idx - 4) * board->uart_offset; } return setup_port(priv, port, bar, offset, board->reg_shift); } /* * HP's Remote Management Console. The Diva chip came in several * different versions. N-class, L2000 and A500 have two Diva chips, each * with 3 UARTs (the third UART on the second chip is unused). Superdome * and Keystone have one Diva chip with 3 UARTs. Some later machines have * one Diva chip, but it has been expanded to 5 UARTs. */ static int pci_hp_diva_init(struct pci_dev *dev) { int rc = 0; switch (dev->subsystem_device) { case PCI_DEVICE_ID_HP_DIVA_TOSCA1: case PCI_DEVICE_ID_HP_DIVA_HALFDOME: case PCI_DEVICE_ID_HP_DIVA_KEYSTONE: case PCI_DEVICE_ID_HP_DIVA_EVEREST: rc = 3; break; case PCI_DEVICE_ID_HP_DIVA_TOSCA2: rc = 2; break; case PCI_DEVICE_ID_HP_DIVA_MAESTRO: rc = 4; break; case PCI_DEVICE_ID_HP_DIVA_POWERBAR: case PCI_DEVICE_ID_HP_DIVA_HURRICANE: rc = 1; break; } return rc; } /* * HP's Diva chip puts the 4th/5th serial port further out, and * some serial ports are supposed to be hidden on certain models. */ static int pci_hp_diva_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int offset = board->first_offset; unsigned int bar = FL_GET_BASE(board->flags); switch (priv->dev->subsystem_device) { case PCI_DEVICE_ID_HP_DIVA_MAESTRO: if (idx == 3) idx++; break; case PCI_DEVICE_ID_HP_DIVA_EVEREST: if (idx > 0) idx++; if (idx > 2) idx++; break; } if (idx > 2) offset = 0x18; offset += idx * board->uart_offset; return setup_port(priv, port, bar, offset, board->reg_shift); } /* * Added for EKF Intel i960 serial boards */ static int pci_inteli960ni_init(struct pci_dev *dev) { unsigned long oldval; if (!(dev->subsystem_device & 0x1000)) return -ENODEV; /* is firmware started? */ pci_read_config_dword(dev, 0x44, (void *)&oldval); if (oldval == 0x00001000L) { /* RESET value */ printk(KERN_DEBUG "Local i960 firmware missing"); return -ENODEV; } return 0; } /* * Some PCI serial cards using the PLX 9050 PCI interface chip require * that the card interrupt be explicitly enabled or disabled. This * seems to be mainly needed on card using the PLX which also use I/O * mapped memory. */ static int pci_plx9050_init(struct pci_dev *dev) { u8 irq_config; void __iomem *p; if ((pci_resource_flags(dev, 0) & IORESOURCE_MEM) == 0) { moan_device("no memory in bar 0", dev); return 0; } irq_config = 0x41; if (dev->vendor == PCI_VENDOR_ID_PANACOM || dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS) irq_config = 0x43; if ((dev->vendor == PCI_VENDOR_ID_PLX) && (dev->device == PCI_DEVICE_ID_PLX_ROMULUS)) /* * As the megawolf cards have the int pins active * high, and have 2 UART chips, both ints must be * enabled on the 9050. Also, the UARTS are set in * 16450 mode by default, so we have to enable the * 16C950 'enhanced' mode so that we can use the * deep FIFOs */ irq_config = 0x5b; /* * enable/disable interrupts */ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); if (p == NULL) return -ENOMEM; writel(irq_config, p + 0x4c); /* * Read the register back to ensure that it took effect. */ readl(p + 0x4c); iounmap(p); return 0; } static void pci_plx9050_exit(struct pci_dev *dev) { u8 __iomem *p; if ((pci_resource_flags(dev, 0) & IORESOURCE_MEM) == 0) return; /* * disable interrupts */ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); if (p != NULL) { writel(0, p + 0x4c); /* * Read the register back to ensure that it took effect. */ readl(p + 0x4c); iounmap(p); } } #define NI8420_INT_ENABLE_REG 0x38 #define NI8420_INT_ENABLE_BIT 0x2000 static void pci_ni8420_exit(struct pci_dev *dev) { void __iomem *p; unsigned long base, len; unsigned int bar = 0; if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { moan_device("no memory in bar", dev); return; } base = pci_resource_start(dev, bar); len = pci_resource_len(dev, bar); p = ioremap_nocache(base, len); if (p == NULL) return; /* Disable the CPU Interrupt */ writel(readl(p + NI8420_INT_ENABLE_REG) & ~(NI8420_INT_ENABLE_BIT), p + NI8420_INT_ENABLE_REG); iounmap(p); } /* MITE registers */ #define MITE_IOWBSR1 0xc4 #define MITE_IOWCR1 0xf4 #define MITE_LCIMR1 0x08 #define MITE_LCIMR2 0x10 #define MITE_LCIMR2_CLR_CPU_IE (1 << 30) static void pci_ni8430_exit(struct pci_dev *dev) { void __iomem *p; unsigned long base, len; unsigned int bar = 0; if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { moan_device("no memory in bar", dev); return; } base = pci_resource_start(dev, bar); len = pci_resource_len(dev, bar); p = ioremap_nocache(base, len); if (p == NULL) return; /* Disable the CPU Interrupt */ writel(MITE_LCIMR2_CLR_CPU_IE, p + MITE_LCIMR2); iounmap(p); } /* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */ static int sbs_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar, offset = board->first_offset; bar = 0; if (idx < 4) { /* first four channels map to 0, 0x100, 0x200, 0x300 */ offset += idx * board->uart_offset; } else if (idx < 8) { /* last four channels map to 0x1000, 0x1100, 0x1200, 0x1300 */ offset += idx * board->uart_offset + 0xC00; } else /* we have only 8 ports on PMC-OCTALPRO */ return 1; return setup_port(priv, port, bar, offset, board->reg_shift); } /* * This does initialization for PMC OCTALPRO cards: * maps the device memory, resets the UARTs (needed, bc * if the module is removed and inserted again, the card * is in the sleep mode) and enables global interrupt. */ /* global control register offset for SBS PMC-OctalPro */ #define OCT_REG_CR_OFF 0x500 static int sbs_init(struct pci_dev *dev) { u8 __iomem *p; p = pci_ioremap_bar(dev, 0); if (p == NULL) return -ENOMEM; /* Set bit-4 Control Register (UART RESET) in to reset the uarts */ writeb(0x10, p + OCT_REG_CR_OFF); udelay(50); writeb(0x0, p + OCT_REG_CR_OFF); /* Set bit-2 (INTENABLE) of Control Register */ writeb(0x4, p + OCT_REG_CR_OFF); iounmap(p); return 0; } /* * Disables the global interrupt of PMC-OctalPro */ static void sbs_exit(struct pci_dev *dev) { u8 __iomem *p; p = pci_ioremap_bar(dev, 0); /* FIXME: What if resource_len < OCT_REG_CR_OFF */ if (p != NULL) writeb(0, p + OCT_REG_CR_OFF); iounmap(p); } /* * SIIG serial cards have an PCI interface chip which also controls * the UART clocking frequency. Each UART can be clocked independently * (except cards equipped with 4 UARTs) and initial clocking settings * are stored in the EEPROM chip. It can cause problems because this * version of serial driver doesn't support differently clocked UART's * on single PCI card. To prevent this, initialization functions set * high frequency clocking for all UART's on given card. It is safe (I * hope) because it doesn't touch EEPROM settings to prevent conflicts * with other OSes (like M$ DOS). * * SIIG support added by Andrey Panin <pazke@donpac.ru>, 10/1999 * * There is two family of SIIG serial cards with different PCI * interface chip and different configuration methods: * - 10x cards have control registers in IO and/or memory space; * - 20x cards have control registers in standard PCI configuration space. * * Note: all 10x cards have PCI device ids 0x10.. * all 20x cards have PCI device ids 0x20.. * * There are also Quartet Serial cards which use Oxford Semiconductor * 16954 quad UART PCI chip clocked by 18.432 MHz quartz. * * Note: some SIIG cards are probed by the parport_serial object. */ #define PCI_DEVICE_ID_SIIG_1S_10x (PCI_DEVICE_ID_SIIG_1S_10x_550 & 0xfffc) #define PCI_DEVICE_ID_SIIG_2S_10x (PCI_DEVICE_ID_SIIG_2S_10x_550 & 0xfff8) static int pci_siig10x_init(struct pci_dev *dev) { u16 data; void __iomem *p; switch (dev->device & 0xfff8) { case PCI_DEVICE_ID_SIIG_1S_10x: /* 1S */ data = 0xffdf; break; case PCI_DEVICE_ID_SIIG_2S_10x: /* 2S, 2S1P */ data = 0xf7ff; break; default: /* 1S1P, 4S */ data = 0xfffb; break; } p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); if (p == NULL) return -ENOMEM; writew(readw(p + 0x28) & data, p + 0x28); readw(p + 0x28); iounmap(p); return 0; } #define PCI_DEVICE_ID_SIIG_2S_20x (PCI_DEVICE_ID_SIIG_2S_20x_550 & 0xfffc) #define PCI_DEVICE_ID_SIIG_2S1P_20x (PCI_DEVICE_ID_SIIG_2S1P_20x_550 & 0xfffc) static int pci_siig20x_init(struct pci_dev *dev) { u8 data; /* Change clock frequency for the first UART. */ pci_read_config_byte(dev, 0x6f, &data); pci_write_config_byte(dev, 0x6f, data & 0xef); /* If this card has 2 UART, we have to do the same with second UART. */ if (((dev->device & 0xfffc) == PCI_DEVICE_ID_SIIG_2S_20x) || ((dev->device & 0xfffc) == PCI_DEVICE_ID_SIIG_2S1P_20x)) { pci_read_config_byte(dev, 0x73, &data); pci_write_config_byte(dev, 0x73, data & 0xef); } return 0; } static int pci_siig_init(struct pci_dev *dev) { unsigned int type = dev->device & 0xff00; if (type == 0x1000) return pci_siig10x_init(dev); else if (type == 0x2000) return pci_siig20x_init(dev); moan_device("Unknown SIIG card", dev); return -ENODEV; } static int pci_siig_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar = FL_GET_BASE(board->flags) + idx, offset = 0; if (idx > 3) { bar = 4; offset = (idx - 4) * 8; } return setup_port(priv, port, bar, offset, 0); } /* * Timedia has an explosion of boards, and to avoid the PCI table from * growing *huge*, we use this function to collapse some 70 entries * in the PCI table into one, for sanity's and compactness's sake. */ static const unsigned short timedia_single_port[] = { 0x4025, 0x4027, 0x4028, 0x5025, 0x5027, 0 }; static const unsigned short timedia_dual_port[] = { 0x0002, 0x4036, 0x4037, 0x4038, 0x4078, 0x4079, 0x4085, 0x4088, 0x4089, 0x5037, 0x5078, 0x5079, 0x5085, 0x6079, 0x7079, 0x8079, 0x8137, 0x8138, 0x8237, 0x8238, 0x9079, 0x9137, 0x9138, 0x9237, 0x9238, 0xA079, 0xB079, 0xC079, 0xD079, 0 }; static const unsigned short timedia_quad_port[] = { 0x4055, 0x4056, 0x4095, 0x4096, 0x5056, 0x8156, 0x8157, 0x8256, 0x8257, 0x9056, 0x9156, 0x9157, 0x9158, 0x9159, 0x9256, 0x9257, 0xA056, 0xA157, 0xA158, 0xA159, 0xB056, 0xB157, 0 }; static const unsigned short timedia_eight_port[] = { 0x4065, 0x4066, 0x5065, 0x5066, 0x8166, 0x9066, 0x9166, 0x9167, 0x9168, 0xA066, 0xA167, 0xA168, 0 }; static const struct timedia_struct { int num; const unsigned short *ids; } timedia_data[] = { { 1, timedia_single_port }, { 2, timedia_dual_port }, { 4, timedia_quad_port }, { 8, timedia_eight_port } }; /* * There are nearly 70 different Timedia/SUNIX PCI serial devices. Instead of * listing them individually, this driver merely grabs them all with * PCI_ANY_ID. Some of these devices, however, also feature a parallel port, * and should be left free to be claimed by parport_serial instead. */ static int pci_timedia_probe(struct pci_dev *dev) { /* * Check the third digit of the subdevice ID * (0,2,3,5,6: serial only -- 7,8,9: serial + parallel) */ if ((dev->subsystem_device & 0x00f0) >= 0x70) { dev_info(&dev->dev, "ignoring Timedia subdevice %04x for parport_serial\n", dev->subsystem_device); return -ENODEV; } return 0; } static int pci_timedia_init(struct pci_dev *dev) { const unsigned short *ids; int i, j; for (i = 0; i < ARRAY_SIZE(timedia_data); i++) { ids = timedia_data[i].ids; for (j = 0; ids[j]; j++) if (dev->subsystem_device == ids[j]) return timedia_data[i].num; } return 0; } /* * Timedia/SUNIX uses a mixture of BARs and offsets * Ugh, this is ugly as all hell --- TYT */ static int pci_timedia_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar = 0, offset = board->first_offset; switch (idx) { case 0: bar = 0; break; case 1: offset = board->uart_offset; bar = 0; break; case 2: bar = 1; break; case 3: offset = board->uart_offset; /* FALLTHROUGH */ case 4: /* BAR 2 */ case 5: /* BAR 3 */ case 6: /* BAR 4 */ case 7: /* BAR 5 */ bar = idx - 2; } return setup_port(priv, port, bar, offset, board->reg_shift); } /* * Some Titan cards are also a little weird */ static int titan_400l_800l_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar, offset = board->first_offset; switch (idx) { case 0: bar = 1; break; case 1: bar = 2; break; default: bar = 4; offset = (idx - 2) * board->uart_offset; } return setup_port(priv, port, bar, offset, board->reg_shift); } static int pci_xircom_init(struct pci_dev *dev) { msleep(100); return 0; } static int pci_ni8420_init(struct pci_dev *dev) { void __iomem *p; unsigned long base, len; unsigned int bar = 0; if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { moan_device("no memory in bar", dev); return 0; } base = pci_resource_start(dev, bar); len = pci_resource_len(dev, bar); p = ioremap_nocache(base, len); if (p == NULL) return -ENOMEM; /* Enable CPU Interrupt */ writel(readl(p + NI8420_INT_ENABLE_REG) | NI8420_INT_ENABLE_BIT, p + NI8420_INT_ENABLE_REG); iounmap(p); return 0; } #define MITE_IOWBSR1_WSIZE 0xa #define MITE_IOWBSR1_WIN_OFFSET 0x800 #define MITE_IOWBSR1_WENAB (1 << 7) #define MITE_LCIMR1_IO_IE_0 (1 << 24) #define MITE_LCIMR2_SET_CPU_IE (1 << 31) #define MITE_IOWCR1_RAMSEL_MASK 0xfffffffe static int pci_ni8430_init(struct pci_dev *dev) { void __iomem *p; unsigned long base, len; u32 device_window; unsigned int bar = 0; if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) { moan_device("no memory in bar", dev); return 0; } base = pci_resource_start(dev, bar); len = pci_resource_len(dev, bar); p = ioremap_nocache(base, len); if (p == NULL) return -ENOMEM; /* Set device window address and size in BAR0 */ device_window = ((base + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00) | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE; writel(device_window, p + MITE_IOWBSR1); /* Set window access to go to RAMSEL IO address space */ writel((readl(p + MITE_IOWCR1) & MITE_IOWCR1_RAMSEL_MASK), p + MITE_IOWCR1); /* Enable IO Bus Interrupt 0 */ writel(MITE_LCIMR1_IO_IE_0, p + MITE_LCIMR1); /* Enable CPU Interrupt */ writel(MITE_LCIMR2_SET_CPU_IE, p + MITE_LCIMR2); iounmap(p); return 0; } /* UART Port Control Register */ #define NI8430_PORTCON 0x0f #define NI8430_PORTCON_TXVR_ENABLE (1 << 3) static int pci_ni8430_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { void __iomem *p; unsigned long base, len; unsigned int bar, offset = board->first_offset; if (idx >= board->num_ports) return 1; bar = FL_GET_BASE(board->flags); offset += idx * board->uart_offset; base = pci_resource_start(priv->dev, bar); len = pci_resource_len(priv->dev, bar); p = ioremap_nocache(base, len); /* enable the transceiver */ writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE, p + offset + NI8430_PORTCON); iounmap(p); return setup_port(priv, port, bar, offset, board->reg_shift); } static int pci_netmos_9900_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar; if ((priv->dev->subsystem_device & 0xff00) == 0x3000) { /* netmos apparently orders BARs by datasheet layout, so serial * ports get BARs 0 and 3 (or 1 and 4 for memmapped) */ bar = 3 * idx; return setup_port(priv, port, bar, 0, board->reg_shift); } else { return pci_default_setup(priv, board, port, idx); } } /* the 99xx series comes with a range of device IDs and a variety * of capabilities: * * 9900 has varying capabilities and can cascade to sub-controllers * (cascading should be purely internal) * 9904 is hardwired with 4 serial ports * 9912 and 9922 are hardwired with 2 serial ports */ static int pci_netmos_9900_numports(struct pci_dev *dev) { unsigned int c = dev->class; unsigned int pi; unsigned short sub_serports; pi = (c & 0xff); if (pi == 2) { return 1; } else if ((pi == 0) && (dev->device == PCI_DEVICE_ID_NETMOS_9900)) { /* two possibilities: 0x30ps encodes number of parallel and * serial ports, or 0x1000 indicates *something*. This is not * immediately obvious, since the 2s1p+4s configuration seems * to offer all functionality on functions 0..2, while still * advertising the same function 3 as the 4s+2s1p config. */ sub_serports = dev->subsystem_device & 0xf; if (sub_serports > 0) { return sub_serports; } else { printk(KERN_NOTICE "NetMos/Mostech serial driver ignoring port on ambiguous config.\n"); return 0; } } moan_device("unknown NetMos/Mostech program interface", dev); return 0; } static int pci_netmos_init(struct pci_dev *dev) { /* subdevice 0x00PS means <P> parallel, <S> serial */ unsigned int num_serial = dev->subsystem_device & 0xf; if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) || (dev->device == PCI_DEVICE_ID_NETMOS_9865)) return 0; if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return 0; switch (dev->device) { /* FALLTHROUGH on all */ case PCI_DEVICE_ID_NETMOS_9904: case PCI_DEVICE_ID_NETMOS_9912: case PCI_DEVICE_ID_NETMOS_9922: case PCI_DEVICE_ID_NETMOS_9900: num_serial = pci_netmos_9900_numports(dev); break; default: if (num_serial == 0 ) { moan_device("unknown NetMos/Mostech device", dev); } } if (num_serial == 0) return -ENODEV; return num_serial; } /* * These chips are available with optionally one parallel port and up to * two serial ports. Unfortunately they all have the same product id. * * Basic configuration is done over a region of 32 I/O ports. The base * ioport is called INTA or INTC, depending on docs/other drivers. * * The region of the 32 I/O ports is configured in POSIO0R... */ /* registers */ #define ITE_887x_MISCR 0x9c #define ITE_887x_INTCBAR 0x78 #define ITE_887x_UARTBAR 0x7c #define ITE_887x_PS0BAR 0x10 #define ITE_887x_POSIO0 0x60 /* I/O space size */ #define ITE_887x_IOSIZE 32 /* I/O space size (bits 26-24; 8 bytes = 011b) */ #define ITE_887x_POSIO_IOSIZE_8 (3 << 24) /* I/O space size (bits 26-24; 32 bytes = 101b) */ #define ITE_887x_POSIO_IOSIZE_32 (5 << 24) /* Decoding speed (1 = slow, 2 = medium, 3 = fast) */ #define ITE_887x_POSIO_SPEED (3 << 29) /* enable IO_Space bit */ #define ITE_887x_POSIO_ENABLE (1 << 31) static int pci_ite887x_init(struct pci_dev *dev) { /* inta_addr are the configuration addresses of the ITE */ static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, 0x200, 0x280, 0 }; int ret, i, type; struct resource *iobase = NULL; u32 miscr, uartbar, ioport; /* search for the base-ioport */ i = 0; while (inta_addr[i] && iobase == NULL) { iobase = request_region(inta_addr[i], ITE_887x_IOSIZE, "ite887x"); if (iobase != NULL) { /* write POSIO0R - speed | size | ioport */ pci_write_config_dword(dev, ITE_887x_POSIO0, ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | ITE_887x_POSIO_IOSIZE_32 | inta_addr[i]); /* write INTCBAR - ioport */ pci_write_config_dword(dev, ITE_887x_INTCBAR, inta_addr[i]); ret = inb(inta_addr[i]); if (ret != 0xff) { /* ioport connected */ break; } release_region(iobase->start, ITE_887x_IOSIZE); iobase = NULL; } i++; } if (!inta_addr[i]) { printk(KERN_ERR "ite887x: could not find iobase\n"); return -ENODEV; } /* start of undocumented type checking (see parport_pc.c) */ type = inb(iobase->start + 0x18) & 0x0f; switch (type) { case 0x2: /* ITE8871 (1P) */ case 0xa: /* ITE8875 (1P) */ ret = 0; break; case 0xe: /* ITE8872 (2S1P) */ ret = 2; break; case 0x6: /* ITE8873 (1S) */ ret = 1; break; case 0x8: /* ITE8874 (2S) */ ret = 2; break; default: moan_device("Unknown ITE887x", dev); ret = -ENODEV; } /* configure all serial ports */ for (i = 0; i < ret; i++) { /* read the I/O port from the device */ pci_read_config_dword(dev, ITE_887x_PS0BAR + (0x4 * (i + 1)), &ioport); ioport &= 0x0000FF00; /* the actual base address */ pci_write_config_dword(dev, ITE_887x_POSIO0 + (0x4 * (i + 1)), ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | ITE_887x_POSIO_IOSIZE_8 | ioport); /* write the ioport to the UARTBAR */ pci_read_config_dword(dev, ITE_887x_UARTBAR, &uartbar); uartbar &= ~(0xffff << (16 * i)); /* clear half the reg */ uartbar |= (ioport << (16 * i)); /* set the ioport */ pci_write_config_dword(dev, ITE_887x_UARTBAR, uartbar); /* get current config */ pci_read_config_dword(dev, ITE_887x_MISCR, &miscr); /* disable interrupts (UARTx_Routing[3:0]) */ miscr &= ~(0xf << (12 - 4 * i)); /* activate the UART (UARTx_En) */ miscr |= 1 << (23 - i); /* write new config with activated UART */ pci_write_config_dword(dev, ITE_887x_MISCR, miscr); } if (ret <= 0) { /* the device has no UARTs if we get here */ release_region(iobase->start, ITE_887x_IOSIZE); } return ret; } static void pci_ite887x_exit(struct pci_dev *dev) { u32 ioport; /* the ioport is bit 0-15 in POSIO0R */ pci_read_config_dword(dev, ITE_887x_POSIO0, &ioport); ioport &= 0xffff; release_region(ioport, ITE_887x_IOSIZE); } /* * Oxford Semiconductor Inc. * Check that device is part of the Tornado range of devices, then determine * the number of ports available on the device. */ static int pci_oxsemi_tornado_init(struct pci_dev *dev) { u8 __iomem *p; unsigned long deviceID; unsigned int number_uarts = 0; /* OxSemi Tornado devices are all 0xCxxx */ if (dev->vendor == PCI_VENDOR_ID_OXSEMI && (dev->device & 0xF000) != 0xC000) return 0; p = pci_iomap(dev, 0, 5); if (p == NULL) return -ENOMEM; deviceID = ioread32(p); /* Tornado device */ if (deviceID == 0x07000200) { number_uarts = ioread8(p + 4); printk(KERN_DEBUG "%d ports detected on Oxford PCI Express device\n", number_uarts); } pci_iounmap(dev, p); return number_uarts; } static int pci_asix_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { port->bugs |= UART_BUG_PARITY; return pci_default_setup(priv, board, port, idx); } /* Quatech devices have their own extra interface features */ struct quatech_feature { u16 devid; bool amcc; }; #define QPCR_TEST_FOR1 0x3F #define QPCR_TEST_GET1 0x00 #define QPCR_TEST_FOR2 0x40 #define QPCR_TEST_GET2 0x40 #define QPCR_TEST_FOR3 0x80 #define QPCR_TEST_GET3 0x40 #define QPCR_TEST_FOR4 0xC0 #define QPCR_TEST_GET4 0x80 #define QOPR_CLOCK_X1 0x0000 #define QOPR_CLOCK_X2 0x0001 #define QOPR_CLOCK_X4 0x0002 #define QOPR_CLOCK_X8 0x0003 #define QOPR_CLOCK_RATE_MASK 0x0003 static struct quatech_feature quatech_cards[] = { { PCI_DEVICE_ID_QUATECH_QSC100, 1 }, { PCI_DEVICE_ID_QUATECH_DSC100, 1 }, { PCI_DEVICE_ID_QUATECH_DSC100E, 0 }, { PCI_DEVICE_ID_QUATECH_DSC200, 1 }, { PCI_DEVICE_ID_QUATECH_DSC200E, 0 }, { PCI_DEVICE_ID_QUATECH_ESC100D, 1 }, { PCI_DEVICE_ID_QUATECH_ESC100M, 1 }, { PCI_DEVICE_ID_QUATECH_QSCP100, 1 }, { PCI_DEVICE_ID_QUATECH_DSCP100, 1 }, { PCI_DEVICE_ID_QUATECH_QSCP200, 1 }, { PCI_DEVICE_ID_QUATECH_DSCP200, 1 }, { PCI_DEVICE_ID_QUATECH_ESCLP100, 0 }, { PCI_DEVICE_ID_QUATECH_QSCLP100, 0 }, { PCI_DEVICE_ID_QUATECH_DSCLP100, 0 }, { PCI_DEVICE_ID_QUATECH_SSCLP100, 0 }, { PCI_DEVICE_ID_QUATECH_QSCLP200, 0 }, { PCI_DEVICE_ID_QUATECH_DSCLP200, 0 }, { PCI_DEVICE_ID_QUATECH_SSCLP200, 0 }, { PCI_DEVICE_ID_QUATECH_SPPXP_100, 0 }, { 0, } }; static int pci_quatech_amcc(u16 devid) { struct quatech_feature *qf = &quatech_cards[0]; while (qf->devid) { if (qf->devid == devid) return qf->amcc; qf++; } pr_err("quatech: unknown port type '0x%04X'.\n", devid); return 0; }; static int pci_quatech_rqopr(struct uart_8250_port *port) { unsigned long base = port->port.iobase; u8 LCR, val; LCR = inb(base + UART_LCR); outb(0xBF, base + UART_LCR); val = inb(base + UART_SCR); outb(LCR, base + UART_LCR); return val; } static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr) { unsigned long base = port->port.iobase; u8 LCR, val; LCR = inb(base + UART_LCR); outb(0xBF, base + UART_LCR); val = inb(base + UART_SCR); outb(qopr, base + UART_SCR); outb(LCR, base + UART_LCR); } static int pci_quatech_rqmcr(struct uart_8250_port *port) { unsigned long base = port->port.iobase; u8 LCR, val, qmcr; LCR = inb(base + UART_LCR); outb(0xBF, base + UART_LCR); val = inb(base + UART_SCR); outb(val | 0x10, base + UART_SCR); qmcr = inb(base + UART_MCR); outb(val, base + UART_SCR); outb(LCR, base + UART_LCR); return qmcr; } static void pci_quatech_wqmcr(struct uart_8250_port *port, u8 qmcr) { unsigned long base = port->port.iobase; u8 LCR, val; LCR = inb(base + UART_LCR); outb(0xBF, base + UART_LCR); val = inb(base + UART_SCR); outb(val | 0x10, base + UART_SCR); outb(qmcr, base + UART_MCR); outb(val, base + UART_SCR); outb(LCR, base + UART_LCR); } static int pci_quatech_has_qmcr(struct uart_8250_port *port) { unsigned long base = port->port.iobase; u8 LCR, val; LCR = inb(base + UART_LCR); outb(0xBF, base + UART_LCR); val = inb(base + UART_SCR); if (val & 0x20) { outb(0x80, UART_LCR); if (!(inb(UART_SCR) & 0x20)) { outb(LCR, base + UART_LCR); return 1; } } return 0; } static int pci_quatech_test(struct uart_8250_port *port) { u8 reg; u8 qopr = pci_quatech_rqopr(port); pci_quatech_wqopr(port, qopr & QPCR_TEST_FOR1); reg = pci_quatech_rqopr(port) & 0xC0; if (reg != QPCR_TEST_GET1) return -EINVAL; pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR2); reg = pci_quatech_rqopr(port) & 0xC0; if (reg != QPCR_TEST_GET2) return -EINVAL; pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR3); reg = pci_quatech_rqopr(port) & 0xC0; if (reg != QPCR_TEST_GET3) return -EINVAL; pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR4); reg = pci_quatech_rqopr(port) & 0xC0; if (reg != QPCR_TEST_GET4) return -EINVAL; pci_quatech_wqopr(port, qopr); return 0; } static int pci_quatech_clock(struct uart_8250_port *port) { u8 qopr, reg, set; unsigned long clock; if (pci_quatech_test(port) < 0) return 1843200; qopr = pci_quatech_rqopr(port); pci_quatech_wqopr(port, qopr & ~QOPR_CLOCK_X8); reg = pci_quatech_rqopr(port); if (reg & QOPR_CLOCK_X8) { clock = 1843200; goto out; } pci_quatech_wqopr(port, qopr | QOPR_CLOCK_X8); reg = pci_quatech_rqopr(port); if (!(reg & QOPR_CLOCK_X8)) { clock = 1843200; goto out; } reg &= QOPR_CLOCK_X8; if (reg == QOPR_CLOCK_X2) { clock = 3685400; set = QOPR_CLOCK_X2; } else if (reg == QOPR_CLOCK_X4) { clock = 7372800; set = QOPR_CLOCK_X4; } else if (reg == QOPR_CLOCK_X8) { clock = 14745600; set = QOPR_CLOCK_X8; } else { clock = 1843200; set = QOPR_CLOCK_X1; } qopr &= ~QOPR_CLOCK_RATE_MASK; qopr |= set; out: pci_quatech_wqopr(port, qopr); return clock; } static int pci_quatech_rs422(struct uart_8250_port *port) { u8 qmcr; int rs422 = 0; if (!pci_quatech_has_qmcr(port)) return 0; qmcr = pci_quatech_rqmcr(port); pci_quatech_wqmcr(port, 0xFF); if (pci_quatech_rqmcr(port)) rs422 = 1; pci_quatech_wqmcr(port, qmcr); return rs422; } static int pci_quatech_init(struct pci_dev *dev) { if (pci_quatech_amcc(dev->device)) { unsigned long base = pci_resource_start(dev, 0); if (base) { u32 tmp; outl(inl(base + 0x38) | 0x00002000, base + 0x38); tmp = inl(base + 0x3c); outl(tmp | 0x01000000, base + 0x3c); outl(tmp &= ~0x01000000, base + 0x3c); } } return 0; } static int pci_quatech_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { /* Needed by pci_quatech calls below */ port->port.iobase = pci_resource_start(priv->dev, FL_GET_BASE(board->flags)); /* Set up the clocking */ port->port.uartclk = pci_quatech_clock(port); /* For now just warn about RS422 */ if (pci_quatech_rs422(port)) pr_warn("quatech: software control of RS422 features not currently supported.\n"); return pci_default_setup(priv, board, port, idx); } static void pci_quatech_exit(struct pci_dev *dev) { } static int pci_default_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { unsigned int bar, offset = board->first_offset, maxnr; bar = FL_GET_BASE(board->flags); if (board->flags & FL_BASE_BARS) bar += idx; else offset += idx * board->uart_offset; maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >> (board->reg_shift + 3); if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr) return 1; return setup_port(priv, port, bar, offset, board->reg_shift); } static int ce4100_serial_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { int ret; ret = setup_port(priv, port, idx, 0, board->reg_shift); port->port.iotype = UPIO_MEM32; port->port.type = PORT_XSCALE; port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); port->port.regshift = 2; return ret; } static int pci_omegapci_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { return setup_port(priv, port, 2, idx * 8, 0); } static int pci_brcm_trumanage_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { int ret = pci_default_setup(priv, board, port, idx); port->port.type = PORT_BRCM_TRUMANAGE; port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); return ret; } static int skip_tx_en_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { port->port.flags |= UPF_NO_TXEN_TEST; printk(KERN_DEBUG "serial8250: skipping TxEn test for device " "[%04x:%04x] subsystem [%04x:%04x]\n", priv->dev->vendor, priv->dev->device, priv->dev->subsystem_vendor, priv->dev->subsystem_device); return pci_default_setup(priv, board, port, idx); } static void kt_handle_break(struct uart_port *p) { struct uart_8250_port *up = container_of(p, struct uart_8250_port, port); /* * On receipt of a BI, serial device in Intel ME (Intel * management engine) needs to have its fifos cleared for sane * SOL (Serial Over Lan) output. */ serial8250_clear_and_reinit_fifos(up); } static unsigned int kt_serial_in(struct uart_port *p, int offset) { struct uart_8250_port *up = container_of(p, struct uart_8250_port, port); unsigned int val; /* * When the Intel ME (management engine) gets reset its serial * port registers could return 0 momentarily. Functions like * serial8250_console_write, read and save the IER, perform * some operation and then restore it. In order to avoid * setting IER register inadvertently to 0, if the value read * is 0, double check with ier value in uart_8250_port and use * that instead. up->ier should be the same value as what is * currently configured. */ val = inb(p->iobase + offset); if (offset == UART_IER) { if (val == 0) val = up->ier; } return val; } static int kt_serial_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { port->port.flags |= UPF_BUG_THRE; port->port.serial_in = kt_serial_in; port->port.handle_break = kt_handle_break; return skip_tx_en_setup(priv, board, port, idx); } static int pci_eg20t_init(struct pci_dev *dev) { #if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE) return -ENODEV; #else return 0; #endif } static int pci_xr17c154_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { port->port.flags |= UPF_EXAR_EFR; return pci_default_setup(priv, board, port, idx); } static int pci_xr17v35x_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { u8 __iomem *p; p = pci_ioremap_bar(priv->dev, 0); if (p == NULL) return -ENOMEM; port->port.flags |= UPF_EXAR_EFR; /* * Setup Multipurpose Input/Output pins. */ if (idx == 0) { writeb(0x00, p + 0x8f); /*MPIOINT[7:0]*/ writeb(0x00, p + 0x90); /*MPIOLVL[7:0]*/ writeb(0x00, p + 0x91); /*MPIO3T[7:0]*/ writeb(0x00, p + 0x92); /*MPIOINV[7:0]*/ writeb(0x00, p + 0x93); /*MPIOSEL[7:0]*/ writeb(0x00, p + 0x94); /*MPIOOD[7:0]*/ writeb(0x00, p + 0x95); /*MPIOINT[15:8]*/ writeb(0x00, p + 0x96); /*MPIOLVL[15:8]*/ writeb(0x00, p + 0x97); /*MPIO3T[15:8]*/ writeb(0x00, p + 0x98); /*MPIOINV[15:8]*/ writeb(0x00, p + 0x99); /*MPIOSEL[15:8]*/ writeb(0x00, p + 0x9a); /*MPIOOD[15:8]*/ } writeb(0x00, p + UART_EXAR_8XMODE); writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR); writeb(128, p + UART_EXAR_TXTRG); writeb(128, p + UART_EXAR_RXTRG); iounmap(p); return pci_default_setup(priv, board, port, idx); } #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a #define PCI_DEVICE_ID_COMMTECH_2328PCI335 0x000b static int pci_fastcom335_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { u8 __iomem *p; p = pci_ioremap_bar(priv->dev, 0); if (p == NULL) return -ENOMEM; port->port.flags |= UPF_EXAR_EFR; /* * Setup Multipurpose Input/Output pins. */ if (idx == 0) { switch (priv->dev->device) { case PCI_DEVICE_ID_COMMTECH_4222PCI335: case PCI_DEVICE_ID_COMMTECH_4224PCI335: writeb(0x78, p + 0x90); /* MPIOLVL[7:0] */ writeb(0x00, p + 0x92); /* MPIOINV[7:0] */ writeb(0x00, p + 0x93); /* MPIOSEL[7:0] */ break; case PCI_DEVICE_ID_COMMTECH_2324PCI335: case PCI_DEVICE_ID_COMMTECH_2328PCI335: writeb(0x00, p + 0x90); /* MPIOLVL[7:0] */ writeb(0xc0, p + 0x92); /* MPIOINV[7:0] */ writeb(0xc0, p + 0x93); /* MPIOSEL[7:0] */ break; } writeb(0x00, p + 0x8f); /* MPIOINT[7:0] */ writeb(0x00, p + 0x91); /* MPIO3T[7:0] */ writeb(0x00, p + 0x94); /* MPIOOD[7:0] */ } writeb(0x00, p + UART_EXAR_8XMODE); writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR); writeb(32, p + UART_EXAR_TXTRG); writeb(32, p + UART_EXAR_RXTRG); iounmap(p); return pci_default_setup(priv, board, port, idx); } static int pci_wch_ch353_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { port->port.flags |= UPF_FIXED_TYPE; port->port.type = PORT_16550A; return pci_default_setup(priv, board, port, idx); } #define PCI_VENDOR_ID_SBSMODULARIO 0x124B #define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B #define PCI_DEVICE_ID_OCTPRO 0x0001 #define PCI_SUBDEVICE_ID_OCTPRO232 0x0108 #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208 #define PCI_SUBDEVICE_ID_POCTAL232 0x0308 #define PCI_SUBDEVICE_ID_POCTAL422 0x0408 #define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500 #define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66 #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620 #define PCI_DEVICE_ID_TITAN_200I 0x8028 #define PCI_DEVICE_ID_TITAN_400I 0x8048 #define PCI_DEVICE_ID_TITAN_800I 0x8088 #define PCI_DEVICE_ID_TITAN_800EH 0xA007 #define PCI_DEVICE_ID_TITAN_800EHB 0xA008 #define PCI_DEVICE_ID_TITAN_400EH 0xA009 #define PCI_DEVICE_ID_TITAN_100E 0xA010 #define PCI_DEVICE_ID_TITAN_200E 0xA012 #define PCI_DEVICE_ID_TITAN_400E 0xA013 #define PCI_DEVICE_ID_TITAN_800E 0xA014 #define PCI_DEVICE_ID_TITAN_200EI 0xA016 #define PCI_DEVICE_ID_TITAN_200EISI 0xA017 #define PCI_DEVICE_ID_TITAN_200V3 0xA306 #define PCI_DEVICE_ID_TITAN_400V3 0xA310 #define PCI_DEVICE_ID_TITAN_410V3 0xA312 #define PCI_DEVICE_ID_TITAN_800V3 0xA314 #define PCI_DEVICE_ID_TITAN_800V3B 0xA315 #define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538 #define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6 #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d #define PCI_VENDOR_ID_WCH 0x4348 #define PCI_DEVICE_ID_WCH_CH352_2S 0x3253 #define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 #define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 #define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 #define PCI_VENDOR_ID_AGESTAR 0x5372 #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 #define PCI_VENDOR_ID_ASIX 0x9710 #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a #define PCI_VENDOR_ID_SUNIX 0x1fd4 #define PCI_DEVICE_ID_SUNIX_1999 0x1999 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 /* * Master list of serial port init/setup/exit quirks. * This does not describe the general nature of the port. * (ie, baud base, number and location of ports, etc) * * This list is ordered alphabetically by vendor then device. * Specific entries must come before more generic entries. */ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { /* * ADDI-DATA GmbH communication cards <info@addi-data.com> */ { .vendor = PCI_VENDOR_ID_ADDIDATA_OLD, .device = PCI_DEVICE_ID_ADDIDATA_APCI7800, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = addidata_apci7800_setup, }, /* * AFAVLAB cards - these may be called via parport_serial * It is not clear whether this applies to all products. */ { .vendor = PCI_VENDOR_ID_AFAVLAB, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = afavlab_setup, }, /* * HP Diva */ { .vendor = PCI_VENDOR_ID_HP, .device = PCI_DEVICE_ID_HP_DIVA, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_hp_diva_init, .setup = pci_hp_diva_setup, }, /* * Intel */ { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_80960_RP, .subvendor = 0xe4bf, .subdevice = PCI_ANY_ID, .init = pci_inteli960ni_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_8257X_SOL, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = skip_tx_en_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_82573L_SOL, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = skip_tx_en_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_82573E_SOL, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = skip_tx_en_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_CE4100_UART, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = ce4100_serial_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = kt_serial_setup, }, /* * ITE */ { .vendor = PCI_VENDOR_ID_ITE, .device = PCI_DEVICE_ID_ITE_8872, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ite887x_init, .setup = pci_default_setup, .exit = pci_ite887x_exit, }, /* * National Instruments */ { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PCI23216, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PCI2328, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PCI2324, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PCI2322, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PCI2324I, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PCI2322I, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PXI8420_23216, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PXI8420_2328, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PXI8420_2324, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PXI8420_2322, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PXI8422_2324, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_DEVICE_ID_NI_PXI8422_2322, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8420_init, .setup = pci_default_setup, .exit = pci_ni8420_exit, }, { .vendor = PCI_VENDOR_ID_NI, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_ni8430_init, .setup = pci_ni8430_setup, .exit = pci_ni8430_exit, }, /* Quatech */ { .vendor = PCI_VENDOR_ID_QUATECH, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_quatech_init, .setup = pci_quatech_setup, .exit = pci_quatech_exit, }, /* * Panacom */ { .vendor = PCI_VENDOR_ID_PANACOM, .device = PCI_DEVICE_ID_PANACOM_QUADMODEM, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_plx9050_init, .setup = pci_default_setup, .exit = pci_plx9050_exit, }, { .vendor = PCI_VENDOR_ID_PANACOM, .device = PCI_DEVICE_ID_PANACOM_DUALMODEM, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_plx9050_init, .setup = pci_default_setup, .exit = pci_plx9050_exit, }, /* * PLX */ { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, .subvendor = PCI_SUBVENDOR_ID_PERLE, .subdevice = PCI_ANY_ID, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, .subvendor = PCI_SUBVENDOR_ID_EXSYS, .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, .init = pci_plx9050_init, .setup = pci_default_setup, .exit = pci_plx9050_exit, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, .subvendor = PCI_SUBVENDOR_ID_KEYSPAN, .subdevice = PCI_SUBDEVICE_ID_KEYSPAN_SX2, .init = pci_plx9050_init, .setup = pci_default_setup, .exit = pci_plx9050_exit, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_ROMULUS, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_ROMULUS, .init = pci_plx9050_init, .setup = pci_default_setup, .exit = pci_plx9050_exit, }, /* * SBS Technologies, Inc., PMC-OCTALPRO 232 */ { .vendor = PCI_VENDOR_ID_SBSMODULARIO, .device = PCI_DEVICE_ID_OCTPRO, .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, .subdevice = PCI_SUBDEVICE_ID_OCTPRO232, .init = sbs_init, .setup = sbs_setup, .exit = sbs_exit, }, /* * SBS Technologies, Inc., PMC-OCTALPRO 422 */ { .vendor = PCI_VENDOR_ID_SBSMODULARIO, .device = PCI_DEVICE_ID_OCTPRO, .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, .subdevice = PCI_SUBDEVICE_ID_OCTPRO422, .init = sbs_init, .setup = sbs_setup, .exit = sbs_exit, }, /* * SBS Technologies, Inc., P-Octal 232 */ { .vendor = PCI_VENDOR_ID_SBSMODULARIO, .device = PCI_DEVICE_ID_OCTPRO, .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, .subdevice = PCI_SUBDEVICE_ID_POCTAL232, .init = sbs_init, .setup = sbs_setup, .exit = sbs_exit, }, /* * SBS Technologies, Inc., P-Octal 422 */ { .vendor = PCI_VENDOR_ID_SBSMODULARIO, .device = PCI_DEVICE_ID_OCTPRO, .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO, .subdevice = PCI_SUBDEVICE_ID_POCTAL422, .init = sbs_init, .setup = sbs_setup, .exit = sbs_exit, }, /* * SIIG cards - these may be called via parport_serial */ { .vendor = PCI_VENDOR_ID_SIIG, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_siig_init, .setup = pci_siig_setup, }, /* * Titan cards */ { .vendor = PCI_VENDOR_ID_TITAN, .device = PCI_DEVICE_ID_TITAN_400L, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = titan_400l_800l_setup, }, { .vendor = PCI_VENDOR_ID_TITAN, .device = PCI_DEVICE_ID_TITAN_800L, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = titan_400l_800l_setup, }, /* * Timedia cards */ { .vendor = PCI_VENDOR_ID_TIMEDIA, .device = PCI_DEVICE_ID_TIMEDIA_1889, .subvendor = PCI_VENDOR_ID_TIMEDIA, .subdevice = PCI_ANY_ID, .probe = pci_timedia_probe, .init = pci_timedia_init, .setup = pci_timedia_setup, }, { .vendor = PCI_VENDOR_ID_TIMEDIA, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_timedia_setup, }, /* * SUNIX (Timedia) cards * Do not "probe" for these cards as there is at least one combination * card that should be handled by parport_pc that doesn't match the * rule in pci_timedia_probe. * It is part number is MIO5079A but its subdevice ID is 0x0102. * There are some boards with part number SER5037AL that report * subdevice ID 0x0002. */ { .vendor = PCI_VENDOR_ID_SUNIX, .device = PCI_DEVICE_ID_SUNIX_1999, .subvendor = PCI_VENDOR_ID_SUNIX, .subdevice = PCI_ANY_ID, .init = pci_timedia_init, .setup = pci_timedia_setup, }, /* * Exar cards */ { .vendor = PCI_VENDOR_ID_EXAR, .device = PCI_DEVICE_ID_EXAR_XR17C152, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17c154_setup, }, { .vendor = PCI_VENDOR_ID_EXAR, .device = PCI_DEVICE_ID_EXAR_XR17C154, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17c154_setup, }, { .vendor = PCI_VENDOR_ID_EXAR, .device = PCI_DEVICE_ID_EXAR_XR17C158, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17c154_setup, }, { .vendor = PCI_VENDOR_ID_EXAR, .device = PCI_DEVICE_ID_EXAR_XR17V352, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, { .vendor = PCI_VENDOR_ID_EXAR, .device = PCI_DEVICE_ID_EXAR_XR17V354, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, { .vendor = PCI_VENDOR_ID_EXAR, .device = PCI_DEVICE_ID_EXAR_XR17V358, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, /* * Xircom cards */ { .vendor = PCI_VENDOR_ID_XIRCOM, .device = PCI_DEVICE_ID_XIRCOM_X3201_MDM, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_xircom_init, .setup = pci_default_setup, }, /* * Netmos cards - these may be called via parport_serial */ { .vendor = PCI_VENDOR_ID_NETMOS, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_netmos_init, .setup = pci_netmos_9900_setup, }, /* * For Oxford Semiconductor Tornado based devices */ { .vendor = PCI_VENDOR_ID_OXSEMI, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_oxsemi_tornado_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_MAINPINE, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_oxsemi_tornado_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_DIGI, .device = PCIE_DEVICE_ID_NEO_2_OX_IBM, .subvendor = PCI_SUBVENDOR_ID_IBM, .subdevice = PCI_ANY_ID, .init = pci_oxsemi_tornado_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8811, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8812, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8813, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8814, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x8027, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x8028, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x8029, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x800C, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x800D, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .init = pci_eg20t_init, .setup = pci_default_setup, }, /* * Cronyx Omega PCI (PLX-chip based) */ { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_CRONYX_OMEGA, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_omegapci_setup, }, /* WCH CH353 2S1P card (16550 clone) */ { .vendor = PCI_VENDOR_ID_WCH, .device = PCI_DEVICE_ID_WCH_CH353_2S1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH353 4S card (16550 clone) */ { .vendor = PCI_VENDOR_ID_WCH, .device = PCI_DEVICE_ID_WCH_CH353_4S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH353 2S1PF card (16550 clone) */ { .vendor = PCI_VENDOR_ID_WCH, .device = PCI_DEVICE_ID_WCH_CH353_2S1PF, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* WCH CH352 2S card (16550 clone) */ { .vendor = PCI_VENDOR_ID_WCH, .device = PCI_DEVICE_ID_WCH_CH352_2S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, /* * ASIX devices with FIFO bug */ { .vendor = PCI_VENDOR_ID_ASIX, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_asix_setup, }, /* * Commtech, Inc. Fastcom adapters * */ { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_4222PCI335, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_fastcom335_setup, }, { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_4224PCI335, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_fastcom335_setup, }, { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_2324PCI335, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_fastcom335_setup, }, { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_2328PCI335, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_fastcom335_setup, }, { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_4222PCIE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_4224PCIE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, { .vendor = PCI_VENDOR_ID_COMMTECH, .device = PCI_DEVICE_ID_COMMTECH_4228PCIE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, /* * Broadcom TruManage (NetXtreme) */ { .vendor = PCI_VENDOR_ID_BROADCOM, .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_brcm_trumanage_setup, }, /* * Default "match everything" terminator entry */ { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .setup = pci_default_setup, } }; static inline int quirk_id_matches(u32 quirk_id, u32 dev_id) { return quirk_id == PCI_ANY_ID || quirk_id == dev_id; } static struct pci_serial_quirk *find_quirk(struct pci_dev *dev) { struct pci_serial_quirk *quirk; for (quirk = pci_serial_quirks; ; quirk++) if (quirk_id_matches(quirk->vendor, dev->vendor) && quirk_id_matches(quirk->device, dev->device) && quirk_id_matches(quirk->subvendor, dev->subsystem_vendor) && quirk_id_matches(quirk->subdevice, dev->subsystem_device)) break; return quirk; } static inline int get_pci_irq(struct pci_dev *dev, const struct pciserial_board *board) { if (board->flags & FL_NOIRQ) return 0; else return dev->irq; } /* * This is the configuration table for all of the PCI serial boards * which we support. It is directly indexed by the pci_board_num_t enum * value, which is encoded in the pci_device_id PCI probe table's * driver_data member. * * The makeup of these names are: * pbn_bn{_bt}_n_baud{_offsetinhex} * * bn = PCI BAR number * bt = Index using PCI BARs * n = number of serial ports * baud = baud rate * offsetinhex = offset for each sequential port (in hex) * * This table is sorted by (in order): bn, bt, baud, offsetindex, n. * * Please note: in theory if n = 1, _bt infix should make no difference. * ie, pbn_b0_1_115200 is the same as pbn_b0_bt_1_115200 */ enum pci_board_num_t { pbn_default = 0, pbn_b0_1_115200, pbn_b0_2_115200, pbn_b0_4_115200, pbn_b0_5_115200, pbn_b0_8_115200, pbn_b0_1_921600, pbn_b0_2_921600, pbn_b0_4_921600, pbn_b0_2_1130000, pbn_b0_4_1152000, pbn_b0_2_1152000_200, pbn_b0_4_1152000_200, pbn_b0_8_1152000_200, pbn_b0_2_1843200, pbn_b0_4_1843200, pbn_b0_2_1843200_200, pbn_b0_4_1843200_200, pbn_b0_8_1843200_200, pbn_b0_1_4000000, pbn_b0_bt_1_115200, pbn_b0_bt_2_115200, pbn_b0_bt_4_115200, pbn_b0_bt_8_115200, pbn_b0_bt_1_460800, pbn_b0_bt_2_460800, pbn_b0_bt_4_460800, pbn_b0_bt_1_921600, pbn_b0_bt_2_921600, pbn_b0_bt_4_921600, pbn_b0_bt_8_921600, pbn_b1_1_115200, pbn_b1_2_115200, pbn_b1_4_115200, pbn_b1_8_115200, pbn_b1_16_115200, pbn_b1_1_921600, pbn_b1_2_921600, pbn_b1_4_921600, pbn_b1_8_921600, pbn_b1_2_1250000, pbn_b1_bt_1_115200, pbn_b1_bt_2_115200, pbn_b1_bt_4_115200, pbn_b1_bt_2_921600, pbn_b1_1_1382400, pbn_b1_2_1382400, pbn_b1_4_1382400, pbn_b1_8_1382400, pbn_b2_1_115200, pbn_b2_2_115200, pbn_b2_4_115200, pbn_b2_8_115200, pbn_b2_1_460800, pbn_b2_4_460800, pbn_b2_8_460800, pbn_b2_16_460800, pbn_b2_1_921600, pbn_b2_4_921600, pbn_b2_8_921600, pbn_b2_8_1152000, pbn_b2_bt_1_115200, pbn_b2_bt_2_115200, pbn_b2_bt_4_115200, pbn_b2_bt_2_921600, pbn_b2_bt_4_921600, pbn_b3_2_115200, pbn_b3_4_115200, pbn_b3_8_115200, pbn_b4_bt_2_921600, pbn_b4_bt_4_921600, pbn_b4_bt_8_921600, /* * Board-specific versions. */ pbn_panacom, pbn_panacom2, pbn_panacom4, pbn_plx_romulus, pbn_oxsemi, pbn_oxsemi_1_4000000, pbn_oxsemi_2_4000000, pbn_oxsemi_4_4000000, pbn_oxsemi_8_4000000, pbn_intel_i960, pbn_sgi_ioc3, pbn_computone_4, pbn_computone_6, pbn_computone_8, pbn_sbsxrsio, pbn_exar_XR17C152, pbn_exar_XR17C154, pbn_exar_XR17C158, pbn_exar_XR17V352, pbn_exar_XR17V354, pbn_exar_XR17V358, pbn_exar_ibm_saturn, pbn_pasemi_1682M, pbn_ni8430_2, pbn_ni8430_4, pbn_ni8430_8, pbn_ni8430_16, pbn_ADDIDATA_PCIe_1_3906250, pbn_ADDIDATA_PCIe_2_3906250, pbn_ADDIDATA_PCIe_4_3906250, pbn_ADDIDATA_PCIe_8_3906250, pbn_ce4100_1_115200, pbn_omegapci, pbn_NETMOS9900_2s_115200, pbn_brcm_trumanage, }; /* * uart_offset - the space between channels * reg_shift - describes how the UART registers are mapped * to PCI memory by the card. * For example IER register on SBS, Inc. PMC-OctPro is located at * offset 0x10 from the UART base, while UART_IER is defined as 1 * in include/linux/serial_reg.h, * see first lines of serial_in() and serial_out() in 8250.c */ static struct pciserial_board pci_boards[] = { [pbn_default] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_1_115200] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_2_115200] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_4_115200] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_5_115200] = { .flags = FL_BASE0, .num_ports = 5, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_8_115200] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_1_921600] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 921600, .uart_offset = 8, }, [pbn_b0_2_921600] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 921600, .uart_offset = 8, }, [pbn_b0_4_921600] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 921600, .uart_offset = 8, }, [pbn_b0_2_1130000] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 1130000, .uart_offset = 8, }, [pbn_b0_4_1152000] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 1152000, .uart_offset = 8, }, [pbn_b0_2_1152000_200] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 1152000, .uart_offset = 0x200, }, [pbn_b0_4_1152000_200] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 1152000, .uart_offset = 0x200, }, [pbn_b0_8_1152000_200] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 1152000, .uart_offset = 0x200, }, [pbn_b0_2_1843200] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 1843200, .uart_offset = 8, }, [pbn_b0_4_1843200] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 1843200, .uart_offset = 8, }, [pbn_b0_2_1843200_200] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 1843200, .uart_offset = 0x200, }, [pbn_b0_4_1843200_200] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 1843200, .uart_offset = 0x200, }, [pbn_b0_8_1843200_200] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 1843200, .uart_offset = 0x200, }, [pbn_b0_1_4000000] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 4000000, .uart_offset = 8, }, [pbn_b0_bt_1_115200] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_bt_2_115200] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_bt_4_115200] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_bt_8_115200] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 8, .base_baud = 115200, .uart_offset = 8, }, [pbn_b0_bt_1_460800] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 1, .base_baud = 460800, .uart_offset = 8, }, [pbn_b0_bt_2_460800] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 2, .base_baud = 460800, .uart_offset = 8, }, [pbn_b0_bt_4_460800] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 4, .base_baud = 460800, .uart_offset = 8, }, [pbn_b0_bt_1_921600] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 1, .base_baud = 921600, .uart_offset = 8, }, [pbn_b0_bt_2_921600] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 2, .base_baud = 921600, .uart_offset = 8, }, [pbn_b0_bt_4_921600] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 4, .base_baud = 921600, .uart_offset = 8, }, [pbn_b0_bt_8_921600] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 8, .base_baud = 921600, .uart_offset = 8, }, [pbn_b1_1_115200] = { .flags = FL_BASE1, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_2_115200] = { .flags = FL_BASE1, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_4_115200] = { .flags = FL_BASE1, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_8_115200] = { .flags = FL_BASE1, .num_ports = 8, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_16_115200] = { .flags = FL_BASE1, .num_ports = 16, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_1_921600] = { .flags = FL_BASE1, .num_ports = 1, .base_baud = 921600, .uart_offset = 8, }, [pbn_b1_2_921600] = { .flags = FL_BASE1, .num_ports = 2, .base_baud = 921600, .uart_offset = 8, }, [pbn_b1_4_921600] = { .flags = FL_BASE1, .num_ports = 4, .base_baud = 921600, .uart_offset = 8, }, [pbn_b1_8_921600] = { .flags = FL_BASE1, .num_ports = 8, .base_baud = 921600, .uart_offset = 8, }, [pbn_b1_2_1250000] = { .flags = FL_BASE1, .num_ports = 2, .base_baud = 1250000, .uart_offset = 8, }, [pbn_b1_bt_1_115200] = { .flags = FL_BASE1|FL_BASE_BARS, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_bt_2_115200] = { .flags = FL_BASE1|FL_BASE_BARS, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_bt_4_115200] = { .flags = FL_BASE1|FL_BASE_BARS, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b1_bt_2_921600] = { .flags = FL_BASE1|FL_BASE_BARS, .num_ports = 2, .base_baud = 921600, .uart_offset = 8, }, [pbn_b1_1_1382400] = { .flags = FL_BASE1, .num_ports = 1, .base_baud = 1382400, .uart_offset = 8, }, [pbn_b1_2_1382400] = { .flags = FL_BASE1, .num_ports = 2, .base_baud = 1382400, .uart_offset = 8, }, [pbn_b1_4_1382400] = { .flags = FL_BASE1, .num_ports = 4, .base_baud = 1382400, .uart_offset = 8, }, [pbn_b1_8_1382400] = { .flags = FL_BASE1, .num_ports = 8, .base_baud = 1382400, .uart_offset = 8, }, [pbn_b2_1_115200] = { .flags = FL_BASE2, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_2_115200] = { .flags = FL_BASE2, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_4_115200] = { .flags = FL_BASE2, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_8_115200] = { .flags = FL_BASE2, .num_ports = 8, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_1_460800] = { .flags = FL_BASE2, .num_ports = 1, .base_baud = 460800, .uart_offset = 8, }, [pbn_b2_4_460800] = { .flags = FL_BASE2, .num_ports = 4, .base_baud = 460800, .uart_offset = 8, }, [pbn_b2_8_460800] = { .flags = FL_BASE2, .num_ports = 8, .base_baud = 460800, .uart_offset = 8, }, [pbn_b2_16_460800] = { .flags = FL_BASE2, .num_ports = 16, .base_baud = 460800, .uart_offset = 8, }, [pbn_b2_1_921600] = { .flags = FL_BASE2, .num_ports = 1, .base_baud = 921600, .uart_offset = 8, }, [pbn_b2_4_921600] = { .flags = FL_BASE2, .num_ports = 4, .base_baud = 921600, .uart_offset = 8, }, [pbn_b2_8_921600] = { .flags = FL_BASE2, .num_ports = 8, .base_baud = 921600, .uart_offset = 8, }, [pbn_b2_8_1152000] = { .flags = FL_BASE2, .num_ports = 8, .base_baud = 1152000, .uart_offset = 8, }, [pbn_b2_bt_1_115200] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 1, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_bt_2_115200] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_bt_4_115200] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b2_bt_2_921600] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 2, .base_baud = 921600, .uart_offset = 8, }, [pbn_b2_bt_4_921600] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 4, .base_baud = 921600, .uart_offset = 8, }, [pbn_b3_2_115200] = { .flags = FL_BASE3, .num_ports = 2, .base_baud = 115200, .uart_offset = 8, }, [pbn_b3_4_115200] = { .flags = FL_BASE3, .num_ports = 4, .base_baud = 115200, .uart_offset = 8, }, [pbn_b3_8_115200] = { .flags = FL_BASE3, .num_ports = 8, .base_baud = 115200, .uart_offset = 8, }, [pbn_b4_bt_2_921600] = { .flags = FL_BASE4, .num_ports = 2, .base_baud = 921600, .uart_offset = 8, }, [pbn_b4_bt_4_921600] = { .flags = FL_BASE4, .num_ports = 4, .base_baud = 921600, .uart_offset = 8, }, [pbn_b4_bt_8_921600] = { .flags = FL_BASE4, .num_ports = 8, .base_baud = 921600, .uart_offset = 8, }, /* * Entries following this are board-specific. */ /* * Panacom - IOMEM */ [pbn_panacom] = { .flags = FL_BASE2, .num_ports = 2, .base_baud = 921600, .uart_offset = 0x400, .reg_shift = 7, }, [pbn_panacom2] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 2, .base_baud = 921600, .uart_offset = 0x400, .reg_shift = 7, }, [pbn_panacom4] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 4, .base_baud = 921600, .uart_offset = 0x400, .reg_shift = 7, }, /* I think this entry is broken - the first_offset looks wrong --rmk */ [pbn_plx_romulus] = { .flags = FL_BASE2, .num_ports = 4, .base_baud = 921600, .uart_offset = 8 << 2, .reg_shift = 2, .first_offset = 0x03, }, /* * This board uses the size of PCI Base region 0 to * signal now many ports are available */ [pbn_oxsemi] = { .flags = FL_BASE0|FL_REGION_SZ_CAP, .num_ports = 32, .base_baud = 115200, .uart_offset = 8, }, [pbn_oxsemi_1_4000000] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 4000000, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_oxsemi_2_4000000] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 4000000, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_oxsemi_4_4000000] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 4000000, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_oxsemi_8_4000000] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 4000000, .uart_offset = 0x200, .first_offset = 0x1000, }, /* * EKF addition for i960 Boards form EKF with serial port. * Max 256 ports. */ [pbn_intel_i960] = { .flags = FL_BASE0, .num_ports = 32, .base_baud = 921600, .uart_offset = 8 << 2, .reg_shift = 2, .first_offset = 0x10000, }, [pbn_sgi_ioc3] = { .flags = FL_BASE0|FL_NOIRQ, .num_ports = 1, .base_baud = 458333, .uart_offset = 8, .reg_shift = 0, .first_offset = 0x20178, }, /* * Computone - uses IOMEM. */ [pbn_computone_4] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 921600, .uart_offset = 0x40, .reg_shift = 2, .first_offset = 0x200, }, [pbn_computone_6] = { .flags = FL_BASE0, .num_ports = 6, .base_baud = 921600, .uart_offset = 0x40, .reg_shift = 2, .first_offset = 0x200, }, [pbn_computone_8] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 921600, .uart_offset = 0x40, .reg_shift = 2, .first_offset = 0x200, }, [pbn_sbsxrsio] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 460800, .uart_offset = 256, .reg_shift = 4, }, /* * Exar Corp. XR17C15[248] Dual/Quad/Octal UART * Only basic 16550A support. * XR17C15[24] are not tested, but they should work. */ [pbn_exar_XR17C152] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 921600, .uart_offset = 0x200, }, [pbn_exar_XR17C154] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 921600, .uart_offset = 0x200, }, [pbn_exar_XR17C158] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 921600, .uart_offset = 0x200, }, [pbn_exar_XR17V352] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 7812500, .uart_offset = 0x400, .reg_shift = 0, .first_offset = 0, }, [pbn_exar_XR17V354] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 7812500, .uart_offset = 0x400, .reg_shift = 0, .first_offset = 0, }, [pbn_exar_XR17V358] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 7812500, .uart_offset = 0x400, .reg_shift = 0, .first_offset = 0, }, [pbn_exar_ibm_saturn] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 921600, .uart_offset = 0x200, }, /* * PA Semi PWRficient PA6T-1682M on-chip UART */ [pbn_pasemi_1682M] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 8333333, }, /* * National Instruments 843x */ [pbn_ni8430_16] = { .flags = FL_BASE0, .num_ports = 16, .base_baud = 3686400, .uart_offset = 0x10, .first_offset = 0x800, }, [pbn_ni8430_8] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 3686400, .uart_offset = 0x10, .first_offset = 0x800, }, [pbn_ni8430_4] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 3686400, .uart_offset = 0x10, .first_offset = 0x800, }, [pbn_ni8430_2] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 3686400, .uart_offset = 0x10, .first_offset = 0x800, }, /* * ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com> */ [pbn_ADDIDATA_PCIe_1_3906250] = { .flags = FL_BASE0, .num_ports = 1, .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_ADDIDATA_PCIe_2_3906250] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_ADDIDATA_PCIe_4_3906250] = { .flags = FL_BASE0, .num_ports = 4, .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_ADDIDATA_PCIe_8_3906250] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, [pbn_ce4100_1_115200] = { .flags = FL_BASE_BARS, .num_ports = 2, .base_baud = 921600, .reg_shift = 2, }, [pbn_omegapci] = { .flags = FL_BASE0, .num_ports = 8, .base_baud = 115200, .uart_offset = 0x200, }, [pbn_NETMOS9900_2s_115200] = { .flags = FL_BASE0, .num_ports = 2, .base_baud = 115200, }, [pbn_brcm_trumanage] = { .flags = FL_BASE0, .num_ports = 1, .reg_shift = 2, .base_baud = 115200, }, }; static const struct pci_device_id blacklist[] = { /* softmodems */ { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */ { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */ /* multi-io cards handled by parport_serial */ { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ }; /* * Given a complete unknown PCI device, try to use some heuristics to * guess what the configuration might be, based on the pitiful PCI * serial specs. Returns 0 on success, 1 on failure. */ static int serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) { const struct pci_device_id *bldev; int num_iomem, num_port, first_port = -1, i; /* * If it is not a communications device or the programming * interface is greater than 6, give up. * * (Should we try to make guesses for multiport serial devices * later?) */ if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || (dev->class & 0xff) > 6) return -ENODEV; /* * Do not access blacklisted devices that are known not to * feature serial ports or are handled by other modules. */ for (bldev = blacklist; bldev < blacklist + ARRAY_SIZE(blacklist); bldev++) { if (dev->vendor == bldev->vendor && dev->device == bldev->device) return -ENODEV; } num_iomem = num_port = 0; for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_IO) { num_port++; if (first_port == -1) first_port = i; } if (pci_resource_flags(dev, i) & IORESOURCE_MEM) num_iomem++; } /* * If there is 1 or 0 iomem regions, and exactly one port, * use it. We guess the number of ports based on the IO * region size. */ if (num_iomem <= 1 && num_port == 1) { board->flags = first_port; board->num_ports = pci_resource_len(dev, first_port) / 8; return 0; } /* * Now guess if we've got a board which indexes by BARs. * Each IO BAR should be 8 bytes, and they should follow * consecutively. */ first_port = -1; num_port = 0; for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_IO && pci_resource_len(dev, i) == 8 && (first_port == -1 || (first_port + num_port) == i)) { num_port++; if (first_port == -1) first_port = i; } } if (num_port > 1) { board->flags = first_port | FL_BASE_BARS; board->num_ports = num_port; return 0; } return -ENODEV; } static inline int serial_pci_matches(const struct pciserial_board *board, const struct pciserial_board *guessed) { return board->num_ports == guessed->num_ports && board->base_baud == guessed->base_baud && board->uart_offset == guessed->uart_offset && board->reg_shift == guessed->reg_shift && board->first_offset == guessed->first_offset; } struct serial_private * pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) { struct uart_8250_port uart; struct serial_private *priv; struct pci_serial_quirk *quirk; int rc, nr_ports, i; nr_ports = board->num_ports; /* * Find an init and setup quirks. */ quirk = find_quirk(dev); /* * Run the new-style initialization function. * The initialization function returns: * <0 - error * 0 - use board->num_ports * >0 - number of ports */ if (quirk->init) { rc = quirk->init(dev); if (rc < 0) { priv = ERR_PTR(rc); goto err_out; } if (rc) nr_ports = rc; } priv = kzalloc(sizeof(struct serial_private) + sizeof(unsigned int) * nr_ports, GFP_KERNEL); if (!priv) { priv = ERR_PTR(-ENOMEM); goto err_deinit; } priv->dev = dev; priv->quirk = quirk; memset(&uart, 0, sizeof(uart)); uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; uart.port.uartclk = board->base_baud * 16; uart.port.irq = get_pci_irq(dev, board); uart.port.dev = &dev->dev; for (i = 0; i < nr_ports; i++) { if (quirk->setup(priv, board, &uart, i)) break; #ifdef SERIAL_DEBUG_PCI printk(KERN_DEBUG "Setup PCI port: port %lx, irq %d, type %d\n", uart.port.iobase, uart.port.irq, uart.port.iotype); #endif priv->line[i] = serial8250_register_8250_port(&uart); if (priv->line[i] < 0) { printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), priv->line[i]); break; } } priv->nr = i; return priv; err_deinit: if (quirk->exit) quirk->exit(dev); err_out: return priv; } EXPORT_SYMBOL_GPL(pciserial_init_ports); void pciserial_remove_ports(struct serial_private *priv) { struct pci_serial_quirk *quirk; int i; for (i = 0; i < priv->nr; i++) serial8250_unregister_port(priv->line[i]); for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { if (priv->remapped_bar[i]) iounmap(priv->remapped_bar[i]); priv->remapped_bar[i] = NULL; } /* * Find the exit quirks. */ quirk = find_quirk(priv->dev); if (quirk->exit) quirk->exit(priv->dev); kfree(priv); } EXPORT_SYMBOL_GPL(pciserial_remove_ports); void pciserial_suspend_ports(struct serial_private *priv) { int i; for (i = 0; i < priv->nr; i++) if (priv->line[i] >= 0) serial8250_suspend_port(priv->line[i]); /* * Ensure that every init quirk is properly torn down */ if (priv->quirk->exit) priv->quirk->exit(priv->dev); } EXPORT_SYMBOL_GPL(pciserial_suspend_ports); void pciserial_resume_ports(struct serial_private *priv) { int i; /* * Ensure that the board is correctly configured. */ if (priv->quirk->init) priv->quirk->init(priv->dev); for (i = 0; i < priv->nr; i++) if (priv->line[i] >= 0) serial8250_resume_port(priv->line[i]); } EXPORT_SYMBOL_GPL(pciserial_resume_ports); /* * Probe one serial board. Unfortunately, there is no rhyme nor reason * to the arrangement of serial ports on a PCI card. */ static int pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) { struct pci_serial_quirk *quirk; struct serial_private *priv; const struct pciserial_board *board; struct pciserial_board tmp; int rc; quirk = find_quirk(dev); if (quirk->probe) { rc = quirk->probe(dev); if (rc) return rc; } if (ent->driver_data >= ARRAY_SIZE(pci_boards)) { printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n", ent->driver_data); return -EINVAL; } board = &pci_boards[ent->driver_data]; rc = pci_enable_device(dev); pci_save_state(dev); if (rc) return rc; if (ent->driver_data == pbn_default) { /* * Use a copy of the pci_board entry for this; * avoid changing entries in the table. */ memcpy(&tmp, board, sizeof(struct pciserial_board)); board = &tmp; /* * We matched one of our class entries. Try to * determine the parameters of this board. */ rc = serial_pci_guess_board(dev, &tmp); if (rc) goto disable; } else { /* * We matched an explicit entry. If we are able to * detect this boards settings with our heuristic, * then we no longer need this entry. */ memcpy(&tmp, &pci_boards[pbn_default], sizeof(struct pciserial_board)); rc = serial_pci_guess_board(dev, &tmp); if (rc == 0 && serial_pci_matches(board, &tmp)) moan_device("Redundant entry in serial pci_table.", dev); } priv = pciserial_init_ports(dev, board); if (!IS_ERR(priv)) { pci_set_drvdata(dev, priv); return 0; } rc = PTR_ERR(priv); disable: pci_disable_device(dev); return rc; } static void pciserial_remove_one(struct pci_dev *dev) { struct serial_private *priv = pci_get_drvdata(dev); pci_set_drvdata(dev, NULL); pciserial_remove_ports(priv); pci_disable_device(dev); } #ifdef CONFIG_PM static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state) { struct serial_private *priv = pci_get_drvdata(dev); if (priv) pciserial_suspend_ports(priv); pci_save_state(dev); pci_set_power_state(dev, pci_choose_state(dev, state)); return 0; } static int pciserial_resume_one(struct pci_dev *dev) { int err; struct serial_private *priv = pci_get_drvdata(dev); pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); if (priv) { /* * The device may have been disabled. Re-enable it. */ err = pci_enable_device(dev); /* FIXME: We cannot simply error out here */ if (err) printk(KERN_ERR "pciserial: Unable to re-enable ports, trying to continue.\n"); pciserial_resume_ports(priv); } return 0; } #endif static struct pci_device_id serial_pci_tbl[] = { /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620, PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0, pbn_b2_8_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0, pbn_b1_8_1382400 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232, 0, 0, pbn_b1_4_1382400 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232, 0, 0, pbn_b1_2_1382400 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0, pbn_b1_8_1382400 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232, 0, 0, pbn_b1_4_1382400 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232, 0, 0, pbn_b1_2_1382400 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485, 0, 0, pbn_b1_8_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4, 0, 0, pbn_b1_8_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485, 0, 0, pbn_b1_4_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2, 0, 0, pbn_b1_4_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485, 0, 0, pbn_b1_2_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6, 0, 0, pbn_b1_8_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1, 0, 0, pbn_b1_8_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1, 0, 0, pbn_b1_4_921600 }, { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ, 0, 0, pbn_b1_2_1250000 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2, 0, 0, pbn_b0_2_1843200 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4, 0, 0, pbn_b0_4_1843200 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, PCI_VENDOR_ID_AFAVLAB, PCI_SUBDEVICE_ID_AFAVLAB_P061, 0, 0, pbn_b0_4_1152000 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232, 0, 0, pbn_b0_2_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232, 0, 0, pbn_b0_4_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232, 0, 0, pbn_b0_8_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1, 0, 0, pbn_b0_2_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2, 0, 0, pbn_b0_4_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4, 0, 0, pbn_b0_8_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2, 0, 0, pbn_b0_2_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4, 0, 0, pbn_b0_4_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8, 0, 0, pbn_b0_8_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485, 0, 0, pbn_b0_2_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485, 0, 0, pbn_b0_4_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158, PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485, 0, 0, pbn_b0_8_1843200_200 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, PCI_VENDOR_ID_IBM, PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT, 0, 0, pbn_exar_ibm_saturn }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_1_115200 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_4_115200 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM232, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_4_115200 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_8_115200 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_7803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_8_460800 }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_8_115200 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_GTEK_SERIAL2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_921600 }, /* * VScom SPCOM800, from sl@s.pl */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_8_921600 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_4_921600 }, /* Unknown card - subdevice 0x1584 */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0, pbn_b2_4_115200 }, /* Unknown card - subdevice 0x1588 */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0, pbn_b2_8_115200 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_KEYSPAN, PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, pbn_panacom }, { PCI_VENDOR_ID_PANACOM, PCI_DEVICE_ID_PANACOM_QUADMODEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_panacom4 }, { PCI_VENDOR_ID_PANACOM, PCI_DEVICE_ID_PANACOM_DUALMODEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_panacom2 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_ESDGMBH, PCI_DEVICE_ID_ESDGMBH_CPCIASIO4, 0, 0, pbn_b2_4_115200 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_CHASE_PCIFAST, PCI_SUBDEVICE_ID_CHASE_PCIFAST4, 0, 0, pbn_b2_4_460800 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_CHASE_PCIFAST, PCI_SUBDEVICE_ID_CHASE_PCIFAST8, 0, 0, pbn_b2_8_460800 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_CHASE_PCIFAST, PCI_SUBDEVICE_ID_CHASE_PCIFAST16, 0, 0, pbn_b2_16_460800 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_CHASE_PCIFAST, PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC, 0, 0, pbn_b2_16_460800 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_CHASE_PCIRAS, PCI_SUBDEVICE_ID_CHASE_PCIRAS4, 0, 0, pbn_b2_4_460800 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_CHASE_PCIRAS, PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0, pbn_b2_8_460800 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4055, 0, 0, pbn_b2_4_115200 }, /* * Megawolf Romulus PCI Serial Card, from Mike Hudson * (Exoray@isys.ca) */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_ROMULUS, 0x10b5, 0x106a, 0, 0, pbn_plx_romulus }, /* * Quatech cards. These actually have configurable clocks but for * now we just use the default. * * 100 series are RS232, 200 series RS422, */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_4_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_4_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_8_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100M, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_8_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_4_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_4_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_4_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_4_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_115200 }, { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESCLP100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_8_115200 }, { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954, PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 0, 0, pbn_b0_4_1152000 }, { PCI_VENDOR_ID_OXSEMI, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, /* * The below card is a little controversial since it is the * subject of a PCI vendor/device ID clash. (See * www.ussg.iu.edu/hypermail/linux/kernel/0303.1/0516.html). * For now just used the hex ID 0x950a. */ { PCI_VENDOR_ID_OXSEMI, 0x950a, PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00, 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_OXSEMI, 0x950a, PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30, 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_OXSEMI, 0x950a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_1130000 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_C950, PCI_VENDOR_ID_OXSEMI, PCI_SUBDEVICE_ID_OXSEMI_C950, 0, 0, pbn_b0_1_921600 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_115200 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958, PCI_ANY_ID , PCI_ANY_ID, 0, 0, pbn_b2_8_1152000 }, /* * Oxford Semiconductor Inc. Tornado PCI express device range. */ { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_4_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_4_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_8_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_8_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, /* * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado */ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0, pbn_oxsemi_2_4000000 }, { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0, pbn_oxsemi_4_4000000 }, { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0, pbn_oxsemi_8_4000000 }, /* * Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado */ { PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM, PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, /* * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, * from skokodyn@yahoo.com */ { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO232, 0, 0, pbn_sbsxrsio }, { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO422, 0, 0, pbn_sbsxrsio }, { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL232, 0, 0, pbn_sbsxrsio }, { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO, PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL422, 0, 0, pbn_sbsxrsio }, /* * Digitan DS560-558, from jimd@esoft.com */ { PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_ATT_VENUS_MODEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_1_115200 }, /* * Titan Electronic cards * The 400L and 800L have a custom setup quirk. */ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100L, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_1_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200L, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_2_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400L, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800L, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_8_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200I, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b4_bt_2_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400I, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b4_bt_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800I, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b4_bt_8_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400EH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800EH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800EHB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_4_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_8_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_460800 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_460800 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_460800 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_8_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_8_921600 }, { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_8_921600 }, /* * Computone devices submitted by Doug McNash dmcnash@computone.com */ { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG, PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG4, 0, 0, pbn_computone_4 }, { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG, PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG8, 0, 0, pbn_computone_8 }, { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG, PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG6, 0, 0, pbn_computone_6 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI95N, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi }, { PCI_VENDOR_ID_TIMEDIA, PCI_DEVICE_ID_TIMEDIA_1889, PCI_VENDOR_ID_TIMEDIA, PCI_ANY_ID, 0, 0, pbn_b0_bt_1_921600 }, /* * SUNIX (TIMEDIA) */ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00, pbn_b0_bt_1_921600 }, { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, pbn_b0_bt_1_921600 }, /* * AFAVLAB serial card, from Harald Welte <laforge@gnumonks.org> */ { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_8_115200 }, { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_8_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DSERIAL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PORT_PLUS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_SSERIAL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_1_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PORT_650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_1_460800 }, /* * Korenix Jetcard F0/F1 cards (JC1204, JC1208, JC1404, JC1408). * Cards are identified by their subsystem vendor IDs, which * (in hex) match the model number. * * Note that JC140x are RS422/485 cards which require ox950 * ACR = 0x10, and as such are not currently fully supported. */ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, 0x1204, 0x0004, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, 0x1208, 0x0004, 0, 0, pbn_b0_4_921600 }, /* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, 0x1402, 0x0002, 0, 0, pbn_b0_2_921600 }, */ /* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0, 0x1404, 0x0004, 0, 0, pbn_b0_4_921600 }, */ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF1, 0x1208, 0x0004, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2, 0x1204, 0x0004, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2, 0x1208, 0x0004, 0, 0, pbn_b0_4_921600 }, { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3, 0x1208, 0x0004, 0, 0, pbn_b0_4_921600 }, /* * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com */ { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_1_1382400 }, /* * Dell Remote Access Card III - Tim_T_Murphy@Dell.com */ { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RACIII, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_1_1382400 }, /* * RAStel 2 port modem, gerg@moreton.com.au */ { PCI_VENDOR_ID_MORETON, PCI_DEVICE_ID_RASTEL_2PORT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, /* * EKF addition for i960 Boards form EKF with serial port */ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80960_RP, 0xE4BF, PCI_ANY_ID, 0, 0, pbn_intel_i960 }, /* * Xircom Cardbus/Ethernet combos */ { PCI_VENDOR_ID_XIRCOM, PCI_DEVICE_ID_XIRCOM_X3201_MDM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_115200 }, /* * Xircom RBM56G cardbus modem - Dirk Arnold (temp entry) */ { PCI_VENDOR_ID_XIRCOM, PCI_DEVICE_ID_XIRCOM_RBM56G, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_115200 }, /* * Untested PCI modems, sent in from various folks... */ /* * Elsa Model 56K PCI Modem, from Andreas Rath <arh@01019freenet.de> */ { PCI_VENDOR_ID_ROCKWELL, 0x1004, 0x1048, 0x1500, 0, 0, pbn_b1_1_115200 }, { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, 0xFF00, 0, 0, 0, pbn_sgi_ioc3 }, /* * HP Diva card */ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA, PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_RMP3, 0, 0, pbn_b1_1_115200 }, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_5_115200 }, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_115200 }, { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b3_2_115200 }, { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b3_4_115200 }, { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b3_8_115200 }, /* * Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17C152 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17C154 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17C158 }, /* * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs */ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V352 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V354, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V354 }, { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V358, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V358 }, /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_115200 }, /* * ITE */ { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8872, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_1_115200 }, /* * IntaShield IS-200 */ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */ pbn_b2_2_115200 }, /* * IntaShield IS-400 */ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, /* * Perle PCI-RAS cards */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS4, 0, 0, pbn_b2_4_921600 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS8, 0, 0, pbn_b2_8_921600 }, /* * Mainpine series cards: Fairly standard layout but fools * parts of the autodetect in some cases and uses otherwise * unmatched communications subclasses in the PCI Express case */ { /* RockForceDUO */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0200, 0, 0, pbn_b0_2_115200 }, { /* RockForceQUATRO */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0300, 0, 0, pbn_b0_4_115200 }, { /* RockForceDUO+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0400, 0, 0, pbn_b0_2_115200 }, { /* RockForceQUATRO+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0500, 0, 0, pbn_b0_4_115200 }, { /* RockForce+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0600, 0, 0, pbn_b0_2_115200 }, { /* RockForce+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0700, 0, 0, pbn_b0_4_115200 }, { /* RockForceOCTO+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0800, 0, 0, pbn_b0_8_115200 }, { /* RockForceDUO+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0C00, 0, 0, pbn_b0_2_115200 }, { /* RockForceQUARTRO+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x0D00, 0, 0, pbn_b0_4_115200 }, { /* RockForceOCTO+ */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x1D00, 0, 0, pbn_b0_8_115200 }, { /* RockForceD1 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2000, 0, 0, pbn_b0_1_115200 }, { /* RockForceF1 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2100, 0, 0, pbn_b0_1_115200 }, { /* RockForceD2 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2200, 0, 0, pbn_b0_2_115200 }, { /* RockForceF2 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2300, 0, 0, pbn_b0_2_115200 }, { /* RockForceD4 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2400, 0, 0, pbn_b0_4_115200 }, { /* RockForceF4 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2500, 0, 0, pbn_b0_4_115200 }, { /* RockForceD8 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2600, 0, 0, pbn_b0_8_115200 }, { /* RockForceF8 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x2700, 0, 0, pbn_b0_8_115200 }, { /* IQ Express D1 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3000, 0, 0, pbn_b0_1_115200 }, { /* IQ Express F1 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3100, 0, 0, pbn_b0_1_115200 }, { /* IQ Express D2 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3200, 0, 0, pbn_b0_2_115200 }, { /* IQ Express F2 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3300, 0, 0, pbn_b0_2_115200 }, { /* IQ Express D4 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3400, 0, 0, pbn_b0_4_115200 }, { /* IQ Express F4 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3500, 0, 0, pbn_b0_4_115200 }, { /* IQ Express D8 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3C00, 0, 0, pbn_b0_8_115200 }, { /* IQ Express F8 */ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE, PCI_VENDOR_ID_MAINPINE, 0x3D00, 0, 0, pbn_b0_8_115200 }, /* * PA Semi PA6T-1682M on-chip UART */ { PCI_VENDOR_ID_PASEMI, 0xa004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pasemi_1682M }, /* * National Instruments */ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI23216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_16_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_8_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_4_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_2_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2324I, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_4_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2322I, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_2_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_23216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_16_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_8_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_4_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_2_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8422_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_4_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8422_2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_bt_2_115200 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_2 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_2 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_4 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_4 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_8 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_8 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_23216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_16 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_23216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_16 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8432_2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_2 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_2 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8432_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_4 }, { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ni8430_4 }, /* * ADDI-DATA GmbH communication cards <info@addi-data.com> */ { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_ADDIDATA_OLD, PCI_DEVICE_ID_ADDIDATA_APCI7800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b1_8_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7500_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7420_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7300_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7500_3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7420_3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7300_3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCI7800_3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_8_115200 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCIe7500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ADDIDATA_PCIe_4_3906250 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCIe7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ADDIDATA_PCIe_2_3906250 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCIe7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ADDIDATA_PCIe_1_3906250 }, { PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_ADDIDATA_APCIe7800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ADDIDATA_PCIe_8_3906250 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, PCI_VENDOR_ID_IBM, 0x0299, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, /* the 9901 is a rebranded 9912 */ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9904, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, 0xA000, 0x3002, 0, 0, pbn_NETMOS9900_2s_115200 }, /* * Best Connectivity and Rosewill PCI Multi I/O cards */ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 0xA000, 0x3002, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 0xA000, 0x3004, 0, 0, pbn_b0_bt_4_115200 }, /* Intel CE4100 */ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_ce4100_1_115200 }, /* * Cronyx Omega PCI */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_omegapci }, /* * Broadcom TruManage */ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_brcm_trumanage }, /* * AgeStar as-prs2-009 */ { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, /* * WCH CH353 series devices: The 2S1P is handled by parport_serial * so not listed here. */ { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_115200 }, { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, /* * Commtech, Inc. Fastcom adapters */ { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_4222PCI335, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_1152000_200 }, { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_4224PCI335, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_1152000_200 }, { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_2324PCI335, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_1152000_200 }, { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_2328PCI335, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_8_1152000_200 }, { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_4222PCIE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V352 }, { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_4224PCIE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V354 }, { PCI_VENDOR_ID_COMMTECH, PCI_DEVICE_ID_COMMTECH_4228PCIE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V358 }, /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00, pbn_default }, { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_MODEM << 8, 0xffff00, pbn_default }, { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, pbn_default }, { 0, } }; static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, pci_channel_state_t state) { struct serial_private *priv = pci_get_drvdata(dev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (priv) pciserial_suspend_ports(priv); pci_disable_device(dev); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) { int rc; rc = pci_enable_device(dev); if (rc) return PCI_ERS_RESULT_DISCONNECT; pci_restore_state(dev); pci_save_state(dev); return PCI_ERS_RESULT_RECOVERED; } static void serial8250_io_resume(struct pci_dev *dev) { struct serial_private *priv = pci_get_drvdata(dev); if (priv) pciserial_resume_ports(priv); } static const struct pci_error_handlers serial8250_err_handler = { .error_detected = serial8250_io_error_detected, .slot_reset = serial8250_io_slot_reset, .resume = serial8250_io_resume, }; static struct pci_driver serial_pci_driver = { .name = "serial", .probe = pciserial_init_one, .remove = pciserial_remove_one, #ifdef CONFIG_PM .suspend = pciserial_suspend_one, .resume = pciserial_resume_one, #endif .id_table = serial_pci_tbl, .err_handler = &serial8250_err_handler, }; module_pci_driver(serial_pci_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic 8250/16x50 PCI serial probe module"); MODULE_DEVICE_TABLE(pci, serial_pci_tbl);
gpl-2.0
linino/linux
drivers/video/fbdev/sh_mobile_hdmi.c
1290
53512
/* * SH-Mobile High-Definition Multimedia Interface (HDMI) driver * for SLISHDMI13T and SLIPHDMIT IP cores * * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/workqueue.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <video/sh_mobile_hdmi.h> #include <video/sh_mobile_lcdc.h> #include "sh_mobile_lcdcfb.h" /* HDMI Core Control Register (HTOP0) */ #define HDMI_SYSTEM_CTRL 0x00 /* System control */ #define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control, bits 19..16 of 20-bit N for Audio Clock Regeneration packet */ #define HDMI_20_BIT_N_FOR_AUDIO_RPKT_15_8 0x02 /* bits 15..8 of 20-bit N for Audio Clock Regeneration packet */ #define HDMI_20_BIT_N_FOR_AUDIO_RPKT_7_0 0x03 /* bits 7..0 of 20-bit N for Audio Clock Regeneration packet */ #define HDMI_SPDIF_AUDIO_SAMP_FREQ_CTS 0x04 /* SPDIF audio sampling frequency, bits 19..16 of Internal CTS */ #define HDMI_INTERNAL_CTS_15_8 0x05 /* bits 15..8 of Internal CTS */ #define HDMI_INTERNAL_CTS_7_0 0x06 /* bits 7..0 of Internal CTS */ #define HDMI_EXTERNAL_CTS_19_16 0x07 /* External CTS */ #define HDMI_EXTERNAL_CTS_15_8 0x08 /* External CTS */ #define HDMI_EXTERNAL_CTS_7_0 0x09 /* External CTS */ #define HDMI_AUDIO_SETTING_1 0x0A /* Audio setting.1 */ #define HDMI_AUDIO_SETTING_2 0x0B /* Audio setting.2 */ #define HDMI_I2S_AUDIO_SET 0x0C /* I2S audio setting */ #define HDMI_DSD_AUDIO_SET 0x0D /* DSD audio setting */ #define HDMI_DEBUG_MONITOR_1 0x0E /* Debug monitor.1 */ #define HDMI_DEBUG_MONITOR_2 0x0F /* Debug monitor.2 */ #define HDMI_I2S_INPUT_PIN_SWAP 0x10 /* I2S input pin swap */ #define HDMI_AUDIO_STATUS_BITS_SETTING_1 0x11 /* Audio status bits setting.1 */ #define HDMI_AUDIO_STATUS_BITS_SETTING_2 0x12 /* Audio status bits setting.2 */ #define HDMI_CATEGORY_CODE 0x13 /* Category code */ #define HDMI_SOURCE_NUM_AUDIO_WORD_LEN 0x14 /* Source number/Audio word length */ #define HDMI_AUDIO_VIDEO_SETTING_1 0x15 /* Audio/Video setting.1 */ #define HDMI_VIDEO_SETTING_1 0x16 /* Video setting.1 */ #define HDMI_DEEP_COLOR_MODES 0x17 /* Deep Color Modes */ /* 12 16- and 10-bit Color space conversion parameters: 0x18..0x2f */ #define HDMI_COLOR_SPACE_CONVERSION_PARAMETERS 0x18 #define HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS 0x30 /* External video parameter settings */ #define HDMI_EXTERNAL_H_TOTAL_7_0 0x31 /* External horizontal total (LSB) */ #define HDMI_EXTERNAL_H_TOTAL_11_8 0x32 /* External horizontal total (MSB) */ #define HDMI_EXTERNAL_H_BLANK_7_0 0x33 /* External horizontal blank (LSB) */ #define HDMI_EXTERNAL_H_BLANK_9_8 0x34 /* External horizontal blank (MSB) */ #define HDMI_EXTERNAL_H_DELAY_7_0 0x35 /* External horizontal delay (LSB) */ #define HDMI_EXTERNAL_H_DELAY_9_8 0x36 /* External horizontal delay (MSB) */ #define HDMI_EXTERNAL_H_DURATION_7_0 0x37 /* External horizontal duration (LSB) */ #define HDMI_EXTERNAL_H_DURATION_9_8 0x38 /* External horizontal duration (MSB) */ #define HDMI_EXTERNAL_V_TOTAL_7_0 0x39 /* External vertical total (LSB) */ #define HDMI_EXTERNAL_V_TOTAL_9_8 0x3A /* External vertical total (MSB) */ #define HDMI_AUDIO_VIDEO_SETTING_2 0x3B /* Audio/Video setting.2 */ #define HDMI_EXTERNAL_V_BLANK 0x3D /* External vertical blank */ #define HDMI_EXTERNAL_V_DELAY 0x3E /* External vertical delay */ #define HDMI_EXTERNAL_V_DURATION 0x3F /* External vertical duration */ #define HDMI_CTRL_PKT_MANUAL_SEND_CONTROL 0x40 /* Control packet manual send control */ #define HDMI_CTRL_PKT_AUTO_SEND 0x41 /* Control packet auto send with VSYNC control */ #define HDMI_AUTO_CHECKSUM_OPTION 0x42 /* Auto checksum option */ #define HDMI_VIDEO_SETTING_2 0x45 /* Video setting.2 */ #define HDMI_OUTPUT_OPTION 0x46 /* Output option */ #define HDMI_SLIPHDMIT_PARAM_OPTION 0x51 /* SLIPHDMIT parameter option */ #define HDMI_HSYNC_PMENT_AT_EMB_7_0 0x52 /* HSYNC placement at embedded sync (LSB) */ #define HDMI_HSYNC_PMENT_AT_EMB_15_8 0x53 /* HSYNC placement at embedded sync (MSB) */ #define HDMI_VSYNC_PMENT_AT_EMB_7_0 0x54 /* VSYNC placement at embedded sync (LSB) */ #define HDMI_VSYNC_PMENT_AT_EMB_14_8 0x55 /* VSYNC placement at embedded sync (MSB) */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_1 0x56 /* SLIPHDMIT parameter settings.1 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_2 0x57 /* SLIPHDMIT parameter settings.2 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_3 0x58 /* SLIPHDMIT parameter settings.3 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_5 0x59 /* SLIPHDMIT parameter settings.5 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_6 0x5A /* SLIPHDMIT parameter settings.6 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_7 0x5B /* SLIPHDMIT parameter settings.7 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_8 0x5C /* SLIPHDMIT parameter settings.8 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_9 0x5D /* SLIPHDMIT parameter settings.9 */ #define HDMI_SLIPHDMIT_PARAM_SETTINGS_10 0x5E /* SLIPHDMIT parameter settings.10 */ #define HDMI_CTRL_PKT_BUF_INDEX 0x5F /* Control packet buffer index */ #define HDMI_CTRL_PKT_BUF_ACCESS_HB0 0x60 /* Control packet data buffer access window - HB0 */ #define HDMI_CTRL_PKT_BUF_ACCESS_HB1 0x61 /* Control packet data buffer access window - HB1 */ #define HDMI_CTRL_PKT_BUF_ACCESS_HB2 0x62 /* Control packet data buffer access window - HB2 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB0 0x63 /* Control packet data buffer access window - PB0 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB1 0x64 /* Control packet data buffer access window - PB1 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB2 0x65 /* Control packet data buffer access window - PB2 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB3 0x66 /* Control packet data buffer access window - PB3 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB4 0x67 /* Control packet data buffer access window - PB4 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB5 0x68 /* Control packet data buffer access window - PB5 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB6 0x69 /* Control packet data buffer access window - PB6 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB7 0x6A /* Control packet data buffer access window - PB7 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB8 0x6B /* Control packet data buffer access window - PB8 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB9 0x6C /* Control packet data buffer access window - PB9 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB10 0x6D /* Control packet data buffer access window - PB10 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB11 0x6E /* Control packet data buffer access window - PB11 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB12 0x6F /* Control packet data buffer access window - PB12 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB13 0x70 /* Control packet data buffer access window - PB13 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB14 0x71 /* Control packet data buffer access window - PB14 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB15 0x72 /* Control packet data buffer access window - PB15 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB16 0x73 /* Control packet data buffer access window - PB16 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB17 0x74 /* Control packet data buffer access window - PB17 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB18 0x75 /* Control packet data buffer access window - PB18 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB19 0x76 /* Control packet data buffer access window - PB19 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB20 0x77 /* Control packet data buffer access window - PB20 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB21 0x78 /* Control packet data buffer access window - PB21 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB22 0x79 /* Control packet data buffer access window - PB22 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB23 0x7A /* Control packet data buffer access window - PB23 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB24 0x7B /* Control packet data buffer access window - PB24 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB25 0x7C /* Control packet data buffer access window - PB25 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB26 0x7D /* Control packet data buffer access window - PB26 */ #define HDMI_CTRL_PKT_BUF_ACCESS_PB27 0x7E /* Control packet data buffer access window - PB27 */ #define HDMI_EDID_KSV_FIFO_ACCESS_WINDOW 0x80 /* EDID/KSV FIFO access window */ #define HDMI_DDC_BUS_ACCESS_FREQ_CTRL_7_0 0x81 /* DDC bus access frequency control (LSB) */ #define HDMI_DDC_BUS_ACCESS_FREQ_CTRL_15_8 0x82 /* DDC bus access frequency control (MSB) */ #define HDMI_INTERRUPT_MASK_1 0x92 /* Interrupt mask.1 */ #define HDMI_INTERRUPT_MASK_2 0x93 /* Interrupt mask.2 */ #define HDMI_INTERRUPT_STATUS_1 0x94 /* Interrupt status.1 */ #define HDMI_INTERRUPT_STATUS_2 0x95 /* Interrupt status.2 */ #define HDMI_INTERRUPT_MASK_3 0x96 /* Interrupt mask.3 */ #define HDMI_INTERRUPT_MASK_4 0x97 /* Interrupt mask.4 */ #define HDMI_INTERRUPT_STATUS_3 0x98 /* Interrupt status.3 */ #define HDMI_INTERRUPT_STATUS_4 0x99 /* Interrupt status.4 */ #define HDMI_SOFTWARE_HDCP_CONTROL_1 0x9A /* Software HDCP control.1 */ #define HDMI_FRAME_COUNTER 0x9C /* Frame counter */ #define HDMI_FRAME_COUNTER_FOR_RI_CHECK 0x9D /* Frame counter for Ri check */ #define HDMI_HDCP_CONTROL 0xAF /* HDCP control */ #define HDMI_RI_FRAME_COUNT_REGISTER 0xB2 /* Ri frame count register */ #define HDMI_DDC_BUS_CONTROL 0xB7 /* DDC bus control */ #define HDMI_HDCP_STATUS 0xB8 /* HDCP status */ #define HDMI_SHA0 0xB9 /* sha0 */ #define HDMI_SHA1 0xBA /* sha1 */ #define HDMI_SHA2 0xBB /* sha2 */ #define HDMI_SHA3 0xBC /* sha3 */ #define HDMI_SHA4 0xBD /* sha4 */ #define HDMI_BCAPS_READ 0xBE /* BCAPS read / debug */ #define HDMI_AKSV_BKSV_7_0_MONITOR 0xBF /* AKSV/BKSV[7:0] monitor */ #define HDMI_AKSV_BKSV_15_8_MONITOR 0xC0 /* AKSV/BKSV[15:8] monitor */ #define HDMI_AKSV_BKSV_23_16_MONITOR 0xC1 /* AKSV/BKSV[23:16] monitor */ #define HDMI_AKSV_BKSV_31_24_MONITOR 0xC2 /* AKSV/BKSV[31:24] monitor */ #define HDMI_AKSV_BKSV_39_32_MONITOR 0xC3 /* AKSV/BKSV[39:32] monitor */ #define HDMI_EDID_SEGMENT_POINTER 0xC4 /* EDID segment pointer */ #define HDMI_EDID_WORD_ADDRESS 0xC5 /* EDID word address */ #define HDMI_EDID_DATA_FIFO_ADDRESS 0xC6 /* EDID data FIFO address */ #define HDMI_NUM_OF_HDMI_DEVICES 0xC7 /* Number of HDMI devices */ #define HDMI_HDCP_ERROR_CODE 0xC8 /* HDCP error code */ #define HDMI_100MS_TIMER_SET 0xC9 /* 100ms timer setting */ #define HDMI_5SEC_TIMER_SET 0xCA /* 5sec timer setting */ #define HDMI_RI_READ_COUNT 0xCB /* Ri read count */ #define HDMI_AN_SEED 0xCC /* An seed */ #define HDMI_MAX_NUM_OF_RCIVRS_ALLOWED 0xCD /* Maximum number of receivers allowed */ #define HDMI_HDCP_MEMORY_ACCESS_CONTROL_1 0xCE /* HDCP memory access control.1 */ #define HDMI_HDCP_MEMORY_ACCESS_CONTROL_2 0xCF /* HDCP memory access control.2 */ #define HDMI_HDCP_CONTROL_2 0xD0 /* HDCP Control 2 */ #define HDMI_HDCP_KEY_MEMORY_CONTROL 0xD2 /* HDCP Key Memory Control */ #define HDMI_COLOR_SPACE_CONV_CONFIG_1 0xD3 /* Color space conversion configuration.1 */ #define HDMI_VIDEO_SETTING_3 0xD4 /* Video setting.3 */ #define HDMI_RI_7_0 0xD5 /* Ri[7:0] */ #define HDMI_RI_15_8 0xD6 /* Ri[15:8] */ #define HDMI_PJ 0xD7 /* Pj */ #define HDMI_SHA_RD 0xD8 /* sha_rd */ #define HDMI_RI_7_0_SAVED 0xD9 /* Ri[7:0] saved */ #define HDMI_RI_15_8_SAVED 0xDA /* Ri[15:8] saved */ #define HDMI_PJ_SAVED 0xDB /* Pj saved */ #define HDMI_NUM_OF_DEVICES 0xDC /* Number of devices */ #define HDMI_HOT_PLUG_MSENS_STATUS 0xDF /* Hot plug/MSENS status */ #define HDMI_BCAPS_WRITE 0xE0 /* bcaps */ #define HDMI_BSTAT_7_0 0xE1 /* bstat[7:0] */ #define HDMI_BSTAT_15_8 0xE2 /* bstat[15:8] */ #define HDMI_BKSV_7_0 0xE3 /* bksv[7:0] */ #define HDMI_BKSV_15_8 0xE4 /* bksv[15:8] */ #define HDMI_BKSV_23_16 0xE5 /* bksv[23:16] */ #define HDMI_BKSV_31_24 0xE6 /* bksv[31:24] */ #define HDMI_BKSV_39_32 0xE7 /* bksv[39:32] */ #define HDMI_AN_7_0 0xE8 /* An[7:0] */ #define HDMI_AN_15_8 0xE9 /* An [15:8] */ #define HDMI_AN_23_16 0xEA /* An [23:16] */ #define HDMI_AN_31_24 0xEB /* An [31:24] */ #define HDMI_AN_39_32 0xEC /* An [39:32] */ #define HDMI_AN_47_40 0xED /* An [47:40] */ #define HDMI_AN_55_48 0xEE /* An [55:48] */ #define HDMI_AN_63_56 0xEF /* An [63:56] */ #define HDMI_PRODUCT_ID 0xF0 /* Product ID */ #define HDMI_REVISION_ID 0xF1 /* Revision ID */ #define HDMI_TEST_MODE 0xFE /* Test mode */ /* HDMI Control Register (HTOP1) */ #define HDMI_HTOP1_TEST_MODE 0x0000 /* Test mode */ #define HDMI_HTOP1_VIDEO_INPUT 0x0008 /* VideoInput */ #define HDMI_HTOP1_CORE_RSTN 0x000C /* CoreResetn */ #define HDMI_HTOP1_PLLBW 0x0018 /* PLLBW */ #define HDMI_HTOP1_CLK_TO_PHY 0x001C /* Clk to Phy */ #define HDMI_HTOP1_VIDEO_INPUT2 0x0020 /* VideoInput2 */ #define HDMI_HTOP1_TISEMP0_1 0x0024 /* tisemp0-1 */ #define HDMI_HTOP1_TISEMP2_C 0x0028 /* tisemp2-c */ #define HDMI_HTOP1_TISIDRV 0x002C /* tisidrv */ #define HDMI_HTOP1_TISEN 0x0034 /* tisen */ #define HDMI_HTOP1_TISDREN 0x0038 /* tisdren */ #define HDMI_HTOP1_CISRANGE 0x003C /* cisrange */ #define HDMI_HTOP1_ENABLE_SELECTOR 0x0040 /* Enable Selector */ #define HDMI_HTOP1_MACRO_RESET 0x0044 /* Macro reset */ #define HDMI_HTOP1_PLL_CALIBRATION 0x0048 /* PLL calibration */ #define HDMI_HTOP1_RE_CALIBRATION 0x004C /* Re-calibration */ #define HDMI_HTOP1_CURRENT 0x0050 /* Current */ #define HDMI_HTOP1_PLL_LOCK_DETECT 0x0054 /* PLL lock detect */ #define HDMI_HTOP1_PHY_TEST_MODE 0x0058 /* PHY Test Mode */ #define HDMI_HTOP1_CLK_SET 0x0080 /* Clock Set */ #define HDMI_HTOP1_DDC_FAIL_SAFE 0x0084 /* DDC fail safe */ #define HDMI_HTOP1_PRBS 0x0088 /* PRBS */ #define HDMI_HTOP1_EDID_AINC_CONTROL 0x008C /* EDID ainc Control */ #define HDMI_HTOP1_HTOP_DCL_MODE 0x00FC /* Deep Coloer Mode */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF0 0x0100 /* Deep Color:FRC COEF0 */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF1 0x0104 /* Deep Color:FRC COEF1 */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF2 0x0108 /* Deep Color:FRC COEF2 */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF3 0x010C /* Deep Color:FRC COEF3 */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C 0x0110 /* Deep Color:FRC COEF0C */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C 0x0114 /* Deep Color:FRC COEF1C */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C 0x0118 /* Deep Color:FRC COEF2C */ #define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C 0x011C /* Deep Color:FRC COEF3C */ #define HDMI_HTOP1_HTOP_DCL_FRC_MODE 0x0120 /* Deep Color:FRC Mode */ #define HDMI_HTOP1_HTOP_DCL_RECT_START1 0x0124 /* Deep Color:Rect Start1 */ #define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1 0x0128 /* Deep Color:Rect Size1 */ #define HDMI_HTOP1_HTOP_DCL_RECT_START2 0x012C /* Deep Color:Rect Start2 */ #define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2 0x0130 /* Deep Color:Rect Size2 */ #define HDMI_HTOP1_HTOP_DCL_RECT_START3 0x0134 /* Deep Color:Rect Start3 */ #define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3 0x0138 /* Deep Color:Rect Size3 */ #define HDMI_HTOP1_HTOP_DCL_RECT_START4 0x013C /* Deep Color:Rect Start4 */ #define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4 0x0140 /* Deep Color:Rect Size4 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1 0x0144 /* Deep Color:Fil Para Y1_1 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2 0x0148 /* Deep Color:Fil Para Y1_2 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1 0x014C /* Deep Color:Fil Para CB1_1 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2 0x0150 /* Deep Color:Fil Para CB1_2 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1 0x0154 /* Deep Color:Fil Para CR1_1 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2 0x0158 /* Deep Color:Fil Para CR1_2 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1 0x015C /* Deep Color:Fil Para Y2_1 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2 0x0160 /* Deep Color:Fil Para Y2_2 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1 0x0164 /* Deep Color:Fil Para CB2_1 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2 0x0168 /* Deep Color:Fil Para CB2_2 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1 0x016C /* Deep Color:Fil Para CR2_1 */ #define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2 0x0170 /* Deep Color:Fil Para CR2_2 */ #define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1 0x0174 /* Deep Color:Cor Para Y1 */ #define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1 0x0178 /* Deep Color:Cor Para CB1 */ #define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1 0x017C /* Deep Color:Cor Para CR1 */ #define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2 0x0180 /* Deep Color:Cor Para Y2 */ #define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2 0x0184 /* Deep Color:Cor Para CB2 */ #define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2 0x0188 /* Deep Color:Cor Para CR2 */ #define HDMI_HTOP1_EDID_DATA_READ 0x0200 /* EDID Data Read 128Byte:0x03FC */ enum hotplug_state { HDMI_HOTPLUG_DISCONNECTED, HDMI_HOTPLUG_CONNECTED, HDMI_HOTPLUG_EDID_DONE, }; struct sh_hdmi { struct sh_mobile_lcdc_entity entity; void __iomem *base; void __iomem *htop1; enum hotplug_state hp_state; /* hot-plug status */ u8 preprogrammed_vic; /* use a pre-programmed VIC or the external mode */ u8 edid_block_addr; u8 edid_segment_nr; u8 edid_blocks; int irq; struct clk *hdmi_clk; struct device *dev; struct delayed_work edid_work; struct fb_videomode mode; struct fb_monspecs monspec; /* register access functions */ void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg); u8 (*read)(struct sh_hdmi *hdmi, u8 reg); }; #define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity) static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg) { iowrite8(data, hdmi->base + reg); } static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg) { return ioread8(hdmi->base + reg); } static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg) { iowrite32((u32)data, hdmi->base + (reg * 4)); udelay(100); } static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg) { return (u8)ioread32(hdmi->base + (reg * 4)); } static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) { hdmi->write(hdmi, data, reg); } static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg) { return hdmi->read(hdmi, reg); } static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg) { u8 val = hdmi_read(hdmi, reg); val &= ~mask; val |= (data & mask); hdmi_write(hdmi, val, reg); } static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg) { iowrite32(data, hdmi->htop1 + reg); udelay(100); } static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg) { return ioread32(hdmi->htop1 + reg); } /* * HDMI sound */ static unsigned int sh_hdmi_snd_read(struct snd_soc_codec *codec, unsigned int reg) { struct sh_hdmi *hdmi = snd_soc_codec_get_drvdata(codec); return hdmi_read(hdmi, reg); } static int sh_hdmi_snd_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct sh_hdmi *hdmi = snd_soc_codec_get_drvdata(codec); hdmi_write(hdmi, value, reg); return 0; } static struct snd_soc_dai_driver sh_hdmi_dai = { .name = "sh_mobile_hdmi-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, }, }; static int sh_hdmi_snd_probe(struct snd_soc_codec *codec) { dev_info(codec->dev, "SH Mobile HDMI Audio Codec"); return 0; } static struct snd_soc_codec_driver soc_codec_dev_sh_hdmi = { .probe = sh_hdmi_snd_probe, .read = sh_hdmi_snd_read, .write = sh_hdmi_snd_write, }; /* * HDMI video */ /* External video parameter settings */ static void sh_hdmi_external_video_param(struct sh_hdmi *hdmi) { struct fb_videomode *mode = &hdmi->mode; u16 htotal, hblank, hdelay, vtotal, vblank, vdelay, voffset; u8 sync = 0; htotal = mode->xres + mode->right_margin + mode->left_margin + mode->hsync_len; hdelay = mode->hsync_len + mode->left_margin; hblank = mode->right_margin + hdelay; /* * Vertical timing looks a bit different in Figure 18, * but let's try the same first by setting offset = 0 */ vtotal = mode->yres + mode->upper_margin + mode->lower_margin + mode->vsync_len; vdelay = mode->vsync_len + mode->upper_margin; vblank = mode->lower_margin + vdelay; voffset = min(mode->upper_margin / 2, 6U); /* * [3]: VSYNC polarity: Positive * [2]: HSYNC polarity: Positive * [1]: Interlace/Progressive: Progressive * [0]: External video settings enable: used. */ if (mode->sync & FB_SYNC_HOR_HIGH_ACT) sync |= 4; if (mode->sync & FB_SYNC_VERT_HIGH_ACT) sync |= 8; dev_dbg(hdmi->dev, "H: %u, %u, %u, %u; V: %u, %u, %u, %u; sync 0x%x\n", htotal, hblank, hdelay, mode->hsync_len, vtotal, vblank, vdelay, mode->vsync_len, sync); hdmi_write(hdmi, sync | (voffset << 4), HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS); hdmi_write(hdmi, htotal, HDMI_EXTERNAL_H_TOTAL_7_0); hdmi_write(hdmi, htotal >> 8, HDMI_EXTERNAL_H_TOTAL_11_8); hdmi_write(hdmi, hblank, HDMI_EXTERNAL_H_BLANK_7_0); hdmi_write(hdmi, hblank >> 8, HDMI_EXTERNAL_H_BLANK_9_8); hdmi_write(hdmi, hdelay, HDMI_EXTERNAL_H_DELAY_7_0); hdmi_write(hdmi, hdelay >> 8, HDMI_EXTERNAL_H_DELAY_9_8); hdmi_write(hdmi, mode->hsync_len, HDMI_EXTERNAL_H_DURATION_7_0); hdmi_write(hdmi, mode->hsync_len >> 8, HDMI_EXTERNAL_H_DURATION_9_8); hdmi_write(hdmi, vtotal, HDMI_EXTERNAL_V_TOTAL_7_0); hdmi_write(hdmi, vtotal >> 8, HDMI_EXTERNAL_V_TOTAL_9_8); hdmi_write(hdmi, vblank, HDMI_EXTERNAL_V_BLANK); hdmi_write(hdmi, vdelay, HDMI_EXTERNAL_V_DELAY); hdmi_write(hdmi, mode->vsync_len, HDMI_EXTERNAL_V_DURATION); /* Set bit 0 of HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS here for external mode */ if (!hdmi->preprogrammed_vic) hdmi_write(hdmi, sync | 1 | (voffset << 4), HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS); } /** * sh_hdmi_video_config() */ static void sh_hdmi_video_config(struct sh_hdmi *hdmi) { /* * [7:4]: Audio sampling frequency: 48kHz * [3:1]: Input video format: RGB and YCbCr 4:4:4 (Y on Green) * [0]: Internal/External DE select: internal */ hdmi_write(hdmi, 0x20, HDMI_AUDIO_VIDEO_SETTING_1); /* * [7:6]: Video output format: RGB 4:4:4 * [5:4]: Input video data width: 8 bit * [3:1]: EAV/SAV location: channel 1 * [0]: Video input color space: RGB */ hdmi_write(hdmi, 0x34, HDMI_VIDEO_SETTING_1); /* * [7:6]: Together with bit [6] of HDMI_AUDIO_VIDEO_SETTING_2, which is * left at 0 by default, this configures 24bpp and sets the Color Depth * (CD) field in the General Control Packet */ hdmi_write(hdmi, 0x20, HDMI_DEEP_COLOR_MODES); } /** * sh_hdmi_audio_config() */ static void sh_hdmi_audio_config(struct sh_hdmi *hdmi) { u8 data; struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev); /* * [7:4] L/R data swap control * [3:0] appropriate N[19:16] */ hdmi_write(hdmi, 0x00, HDMI_L_R_DATA_SWAP_CTRL_RPKT); /* appropriate N[15:8] */ hdmi_write(hdmi, 0x18, HDMI_20_BIT_N_FOR_AUDIO_RPKT_15_8); /* appropriate N[7:0] */ hdmi_write(hdmi, 0x00, HDMI_20_BIT_N_FOR_AUDIO_RPKT_7_0); /* [7:4] 48 kHz SPDIF not used */ hdmi_write(hdmi, 0x20, HDMI_SPDIF_AUDIO_SAMP_FREQ_CTS); /* * [6:5] set required down sampling rate if required * [4:3] set required audio source */ switch (pdata->flags & HDMI_SND_SRC_MASK) { default: /* fall through */ case HDMI_SND_SRC_I2S: data = 0x0 << 3; break; case HDMI_SND_SRC_SPDIF: data = 0x1 << 3; break; case HDMI_SND_SRC_DSD: data = 0x2 << 3; break; case HDMI_SND_SRC_HBR: data = 0x3 << 3; break; } hdmi_write(hdmi, data, HDMI_AUDIO_SETTING_1); /* [3:0] set sending channel number for channel status */ hdmi_write(hdmi, 0x40, HDMI_AUDIO_SETTING_2); /* * [5:2] set valid I2S source input pin * [1:0] set input I2S source mode */ hdmi_write(hdmi, 0x04, HDMI_I2S_AUDIO_SET); /* [7:4] set valid DSD source input pin */ hdmi_write(hdmi, 0x00, HDMI_DSD_AUDIO_SET); /* [7:0] set appropriate I2S input pin swap settings if required */ hdmi_write(hdmi, 0x00, HDMI_I2S_INPUT_PIN_SWAP); /* * [7] set validity bit for channel status * [3:0] set original sample frequency for channel status */ hdmi_write(hdmi, 0x00, HDMI_AUDIO_STATUS_BITS_SETTING_1); /* * [7] set value for channel status * [6] set value for channel status * [5] set copyright bit for channel status * [4:2] set additional information for channel status * [1:0] set clock accuracy for channel status */ hdmi_write(hdmi, 0x00, HDMI_AUDIO_STATUS_BITS_SETTING_2); /* [7:0] set category code for channel status */ hdmi_write(hdmi, 0x00, HDMI_CATEGORY_CODE); /* * [7:4] set source number for channel status * [3:0] set word length for channel status */ hdmi_write(hdmi, 0x00, HDMI_SOURCE_NUM_AUDIO_WORD_LEN); /* [7:4] set sample frequency for channel status */ hdmi_write(hdmi, 0x20, HDMI_AUDIO_VIDEO_SETTING_1); } /** * sh_hdmi_phy_config() - configure the HDMI PHY for the used video mode */ static void sh_hdmi_phy_config(struct sh_hdmi *hdmi) { if (hdmi->mode.pixclock < 10000) { /* for 1080p8bit 148MHz */ hdmi_write(hdmi, 0x1d, HDMI_SLIPHDMIT_PARAM_SETTINGS_1); hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_2); hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_3); hdmi_write(hdmi, 0x4c, HDMI_SLIPHDMIT_PARAM_SETTINGS_5); hdmi_write(hdmi, 0x1e, HDMI_SLIPHDMIT_PARAM_SETTINGS_6); hdmi_write(hdmi, 0x48, HDMI_SLIPHDMIT_PARAM_SETTINGS_7); hdmi_write(hdmi, 0x0e, HDMI_SLIPHDMIT_PARAM_SETTINGS_8); hdmi_write(hdmi, 0x25, HDMI_SLIPHDMIT_PARAM_SETTINGS_9); hdmi_write(hdmi, 0x04, HDMI_SLIPHDMIT_PARAM_SETTINGS_10); } else if (hdmi->mode.pixclock < 30000) { /* 720p, 8bit, 74.25MHz. Might need to be adjusted for other formats */ /* * [1:0] Speed_A * [3:2] Speed_B * [4] PLLA_Bypass * [6] DRV_TEST_EN * [7] DRV_TEST_IN */ hdmi_write(hdmi, 0x0f, HDMI_SLIPHDMIT_PARAM_SETTINGS_1); /* PLLB_CONFIG[17], PLLA_CONFIG[17] - not in PHY datasheet */ hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_2); /* * [2:0] BGR_I_OFFSET * [6:4] BGR_V_OFFSET */ hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_3); /* PLLA_CONFIG[7:0]: VCO gain, VCO offset, LPF resistance[0] */ hdmi_write(hdmi, 0x44, HDMI_SLIPHDMIT_PARAM_SETTINGS_5); /* * PLLA_CONFIG[15:8]: regulator voltage[0], CP current, * LPF capacitance, LPF resistance[1] */ hdmi_write(hdmi, 0x32, HDMI_SLIPHDMIT_PARAM_SETTINGS_6); /* PLLB_CONFIG[7:0]: LPF resistance[0], VCO offset, VCO gain */ hdmi_write(hdmi, 0x4A, HDMI_SLIPHDMIT_PARAM_SETTINGS_7); /* * PLLB_CONFIG[15:8]: regulator voltage[0], CP current, * LPF capacitance, LPF resistance[1] */ hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_8); /* DRV_CONFIG, PE_CONFIG */ hdmi_write(hdmi, 0x25, HDMI_SLIPHDMIT_PARAM_SETTINGS_9); /* * [2:0] AMON_SEL (4 == LPF voltage) * [4] PLLA_CONFIG[16] * [5] PLLB_CONFIG[16] */ hdmi_write(hdmi, 0x04, HDMI_SLIPHDMIT_PARAM_SETTINGS_10); } else { /* for 480p8bit 27MHz */ hdmi_write(hdmi, 0x19, HDMI_SLIPHDMIT_PARAM_SETTINGS_1); hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_2); hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_3); hdmi_write(hdmi, 0x44, HDMI_SLIPHDMIT_PARAM_SETTINGS_5); hdmi_write(hdmi, 0x32, HDMI_SLIPHDMIT_PARAM_SETTINGS_6); hdmi_write(hdmi, 0x48, HDMI_SLIPHDMIT_PARAM_SETTINGS_7); hdmi_write(hdmi, 0x0F, HDMI_SLIPHDMIT_PARAM_SETTINGS_8); hdmi_write(hdmi, 0x20, HDMI_SLIPHDMIT_PARAM_SETTINGS_9); hdmi_write(hdmi, 0x04, HDMI_SLIPHDMIT_PARAM_SETTINGS_10); } } /** * sh_hdmi_avi_infoframe_setup() - Auxiliary Video Information InfoFrame CONTROL PACKET */ static void sh_hdmi_avi_infoframe_setup(struct sh_hdmi *hdmi) { u8 vic; /* AVI InfoFrame */ hdmi_write(hdmi, 0x06, HDMI_CTRL_PKT_BUF_INDEX); /* Packet Type = 0x82 */ hdmi_write(hdmi, 0x82, HDMI_CTRL_PKT_BUF_ACCESS_HB0); /* Version = 0x02 */ hdmi_write(hdmi, 0x02, HDMI_CTRL_PKT_BUF_ACCESS_HB1); /* Length = 13 (0x0D) */ hdmi_write(hdmi, 0x0D, HDMI_CTRL_PKT_BUF_ACCESS_HB2); /* N. A. Checksum */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0); /* * Y = RGB * A0 = No Data * B = Bar Data not valid * S = No Data */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB1); /* * [7:6] C = Colorimetry: no data * [5:4] M = 2: 16:9, 1: 4:3 Picture Aspect Ratio * [3:0] R = 8: Active Frame Aspect Ratio: same as picture aspect ratio */ hdmi_write(hdmi, 0x28, HDMI_CTRL_PKT_BUF_ACCESS_PB2); /* * ITC = No Data * EC = xvYCC601 * Q = Default (depends on video format) * SC = No Known non_uniform Scaling */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB3); /* * VIC should be ignored if external config is used, so, we could just use 0, * but play safe and use a valid value in any case just in case */ if (hdmi->preprogrammed_vic) vic = hdmi->preprogrammed_vic; else vic = 4; hdmi_write(hdmi, vic, HDMI_CTRL_PKT_BUF_ACCESS_PB4); /* PR = No Repetition */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB5); /* Line Number of End of Top Bar (lower 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB6); /* Line Number of End of Top Bar (upper 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB7); /* Line Number of Start of Bottom Bar (lower 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB8); /* Line Number of Start of Bottom Bar (upper 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB9); /* Pixel Number of End of Left Bar (lower 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB10); /* Pixel Number of End of Left Bar (upper 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB11); /* Pixel Number of Start of Right Bar (lower 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB12); /* Pixel Number of Start of Right Bar (upper 8 bits) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB13); } /** * sh_hdmi_audio_infoframe_setup() - Audio InfoFrame of CONTROL PACKET */ static void sh_hdmi_audio_infoframe_setup(struct sh_hdmi *hdmi) { /* Audio InfoFrame */ hdmi_write(hdmi, 0x08, HDMI_CTRL_PKT_BUF_INDEX); /* Packet Type = 0x84 */ hdmi_write(hdmi, 0x84, HDMI_CTRL_PKT_BUF_ACCESS_HB0); /* Version Number = 0x01 */ hdmi_write(hdmi, 0x01, HDMI_CTRL_PKT_BUF_ACCESS_HB1); /* 0 Length = 10 (0x0A) */ hdmi_write(hdmi, 0x0A, HDMI_CTRL_PKT_BUF_ACCESS_HB2); /* n. a. Checksum */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0); /* Audio Channel Count = Refer to Stream Header */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB1); /* Refer to Stream Header */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB2); /* Format depends on coding type (i.e. CT0...CT3) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB3); /* Speaker Channel Allocation = Front Right + Front Left */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB4); /* Level Shift Value = 0 dB, Down - mix is permitted or no information */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB5); /* Reserved (0) */ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB6); hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB7); hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB8); hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB9); hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB10); } /** * sh_hdmi_configure() - Initialise HDMI for output */ static void sh_hdmi_configure(struct sh_hdmi *hdmi) { /* Configure video format */ sh_hdmi_video_config(hdmi); /* Configure audio format */ sh_hdmi_audio_config(hdmi); /* Configure PHY */ sh_hdmi_phy_config(hdmi); /* Auxiliary Video Information (AVI) InfoFrame */ sh_hdmi_avi_infoframe_setup(hdmi); /* Audio InfoFrame */ sh_hdmi_audio_infoframe_setup(hdmi); /* * Control packet auto send with VSYNC control: auto send * General control, Gamut metadata, ISRC, and ACP packets */ hdmi_write(hdmi, 0x8E, HDMI_CTRL_PKT_AUTO_SEND); /* FIXME */ msleep(10); /* PS mode b->d, reset PLLA and PLLB */ hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL); udelay(10); hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL); } static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi, const struct fb_videomode *mode, unsigned long *hdmi_rate, unsigned long *parent_rate) { unsigned long target = PICOS2KHZ(mode->pixclock) * 1000, rate_error; struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev); *hdmi_rate = clk_round_rate(hdmi->hdmi_clk, target); if ((long)*hdmi_rate < 0) *hdmi_rate = clk_get_rate(hdmi->hdmi_clk); rate_error = (long)*hdmi_rate > 0 ? abs(*hdmi_rate - target) : ULONG_MAX; if (rate_error && pdata->clk_optimize_parent) rate_error = pdata->clk_optimize_parent(target, hdmi_rate, parent_rate); else if (clk_get_parent(hdmi->hdmi_clk)) *parent_rate = clk_get_rate(clk_get_parent(hdmi->hdmi_clk)); dev_dbg(hdmi->dev, "%u-%u-%u-%u x %u-%u-%u-%u\n", mode->left_margin, mode->xres, mode->right_margin, mode->hsync_len, mode->upper_margin, mode->yres, mode->lower_margin, mode->vsync_len); dev_dbg(hdmi->dev, "\t@%lu(+/-%lu)Hz, e=%lu / 1000, r=%uHz, p=%luHz\n", target, rate_error, rate_error ? 10000 / (10 * target / rate_error) : 0, mode->refresh, *parent_rate); return rate_error; } static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate, unsigned long *parent_rate) { struct sh_mobile_lcdc_chan *ch = hdmi->entity.lcdc; const struct fb_videomode *mode, *found = NULL; unsigned int f_width = 0, f_height = 0, f_refresh = 0; unsigned long found_rate_error = ULONG_MAX; /* silly compiler... */ bool scanning = false, preferred_bad = false; bool use_edid_mode = false; u8 edid[128]; char *forced; int i; /* Read EDID */ dev_dbg(hdmi->dev, "Read back EDID code:"); for (i = 0; i < 128; i++) { edid[i] = (hdmi->htop1) ? (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) : hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW); #ifdef DEBUG if ((i % 16) == 0) { printk(KERN_CONT "\n"); printk(KERN_DEBUG "%02X | %02X", i, edid[i]); } else { printk(KERN_CONT " %02X", edid[i]); } #endif } #ifdef DEBUG printk(KERN_CONT "\n"); #endif if (!hdmi->edid_blocks) { fb_edid_to_monspecs(edid, &hdmi->monspec); hdmi->edid_blocks = edid[126] + 1; dev_dbg(hdmi->dev, "%d main modes, %d extension blocks\n", hdmi->monspec.modedb_len, hdmi->edid_blocks - 1); } else { dev_dbg(hdmi->dev, "Extension %u detected, DTD start %u\n", edid[0], edid[2]); fb_edid_add_monspecs(edid, &hdmi->monspec); } if (hdmi->edid_blocks > hdmi->edid_segment_nr * 2 + (hdmi->edid_block_addr >> 7) + 1) { /* More blocks to read */ if (hdmi->edid_block_addr) { hdmi->edid_block_addr = 0; hdmi->edid_segment_nr++; } else { hdmi->edid_block_addr = 0x80; } /* Set EDID word address */ hdmi_write(hdmi, hdmi->edid_block_addr, HDMI_EDID_WORD_ADDRESS); /* Enable EDID interrupt */ hdmi_write(hdmi, 0xC6, HDMI_INTERRUPT_MASK_1); /* Set EDID segment pointer - starts reading EDID */ hdmi_write(hdmi, hdmi->edid_segment_nr, HDMI_EDID_SEGMENT_POINTER); return -EAGAIN; } /* All E-EDID blocks ready */ dev_dbg(hdmi->dev, "%d main and extended modes\n", hdmi->monspec.modedb_len); fb_get_options("sh_mobile_lcdc", &forced); if (forced && *forced) { /* Only primitive parsing so far */ i = sscanf(forced, "%ux%u@%u", &f_width, &f_height, &f_refresh); if (i < 2) { f_width = 0; f_height = 0; } else { /* The user wants us to use the EDID data */ scanning = true; } dev_dbg(hdmi->dev, "Forced mode %ux%u@%uHz\n", f_width, f_height, f_refresh); } /* Walk monitor modes to find the best or the exact match */ for (i = 0, mode = hdmi->monspec.modedb; i < hdmi->monspec.modedb_len && scanning; i++, mode++) { unsigned long rate_error; if (!f_width && !f_height) { /* * A parameter string "video=sh_mobile_lcdc:0x0" means * use the preferred EDID mode. If it is rejected by * .fb_check_var(), keep looking, until an acceptable * one is found. */ if ((mode->flag & FB_MODE_IS_FIRST) || preferred_bad) scanning = false; else continue; } else if (f_width != mode->xres || f_height != mode->yres) { /* No interest in unmatching modes */ continue; } rate_error = sh_hdmi_rate_error(hdmi, mode, hdmi_rate, parent_rate); if (scanning) { if (f_refresh == mode->refresh || (!f_refresh && !rate_error)) /* * Exact match if either the refresh rate * matches or it hasn't been specified and we've * found a mode, for which we can configure the * clock precisely */ scanning = false; else if (found && found_rate_error <= rate_error) /* * We otherwise search for the closest matching * clock rate - either if no refresh rate has * been specified or we cannot find an exactly * matching one */ continue; } /* Check if supported: sufficient fb memory, supported clock-rate */ if (ch && ch->notify && ch->notify(ch, SH_MOBILE_LCDC_EVENT_DISPLAY_MODE, mode, NULL)) { scanning = true; preferred_bad = true; continue; } found = mode; found_rate_error = rate_error; use_edid_mode = true; } /* * TODO 1: if no default mode is present, postpone running the config * until after the LCDC channel is initialized. * TODO 2: consider registering the HDMI platform device from the LCDC * driver. */ if (!found && hdmi->entity.def_mode.xres != 0) { found = &hdmi->entity.def_mode; found_rate_error = sh_hdmi_rate_error(hdmi, found, hdmi_rate, parent_rate); } /* No cookie today */ if (!found) return -ENXIO; if (found->xres == 640 && found->yres == 480 && found->refresh == 60) hdmi->preprogrammed_vic = 1; else if (found->xres == 720 && found->yres == 480 && found->refresh == 60) hdmi->preprogrammed_vic = 2; else if (found->xres == 720 && found->yres == 576 && found->refresh == 50) hdmi->preprogrammed_vic = 17; else if (found->xres == 1280 && found->yres == 720 && found->refresh == 60) hdmi->preprogrammed_vic = 4; else if (found->xres == 1920 && found->yres == 1080 && found->refresh == 24) hdmi->preprogrammed_vic = 32; else if (found->xres == 1920 && found->yres == 1080 && found->refresh == 50) hdmi->preprogrammed_vic = 31; else if (found->xres == 1920 && found->yres == 1080 && found->refresh == 60) hdmi->preprogrammed_vic = 16; else hdmi->preprogrammed_vic = 0; dev_dbg(hdmi->dev, "Using %s %s mode %ux%u@%uHz (%luHz), " "clock error %luHz\n", use_edid_mode ? "EDID" : "default", hdmi->preprogrammed_vic ? "VIC" : "external", found->xres, found->yres, found->refresh, PICOS2KHZ(found->pixclock) * 1000, found_rate_error); hdmi->mode = *found; sh_hdmi_external_video_param(hdmi); return 0; } static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id) { struct sh_hdmi *hdmi = dev_id; u8 status1, status2, mask1, mask2; /* mode_b and PLLA and PLLB reset */ hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL); /* How long shall reset be held? */ udelay(10); /* mode_b and PLLA and PLLB reset release */ hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL); status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1); status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2); mask1 = hdmi_read(hdmi, HDMI_INTERRUPT_MASK_1); mask2 = hdmi_read(hdmi, HDMI_INTERRUPT_MASK_2); /* Correct would be to ack only set bits, but the datasheet requires 0xff */ hdmi_write(hdmi, 0xFF, HDMI_INTERRUPT_STATUS_1); hdmi_write(hdmi, 0xFF, HDMI_INTERRUPT_STATUS_2); if (printk_ratelimit()) dev_dbg(hdmi->dev, "IRQ #%d: Status #1: 0x%x & 0x%x, #2: 0x%x & 0x%x\n", irq, status1, mask1, status2, mask2); if (!((status1 & mask1) | (status2 & mask2))) { return IRQ_NONE; } else if (status1 & 0xc0) { u8 msens; /* Datasheet specifies 10ms... */ udelay(500); msens = hdmi_read(hdmi, HDMI_HOT_PLUG_MSENS_STATUS); dev_dbg(hdmi->dev, "MSENS 0x%x\n", msens); /* Check, if hot plug & MSENS pin status are both high */ if ((msens & 0xC0) == 0xC0) { /* Display plug in */ hdmi->edid_segment_nr = 0; hdmi->edid_block_addr = 0; hdmi->edid_blocks = 0; hdmi->hp_state = HDMI_HOTPLUG_CONNECTED; /* Set EDID word address */ hdmi_write(hdmi, 0x00, HDMI_EDID_WORD_ADDRESS); /* Enable EDID interrupt */ hdmi_write(hdmi, 0xC6, HDMI_INTERRUPT_MASK_1); /* Set EDID segment pointer - starts reading EDID */ hdmi_write(hdmi, 0x00, HDMI_EDID_SEGMENT_POINTER); } else if (!(status1 & 0x80)) { /* Display unplug, beware multiple interrupts */ if (hdmi->hp_state != HDMI_HOTPLUG_DISCONNECTED) { hdmi->hp_state = HDMI_HOTPLUG_DISCONNECTED; schedule_delayed_work(&hdmi->edid_work, 0); } /* display_off will switch back to mode_a */ } } else if (status1 & 2) { /* EDID error interrupt: retry */ /* Set EDID word address */ hdmi_write(hdmi, hdmi->edid_block_addr, HDMI_EDID_WORD_ADDRESS); /* Set EDID segment pointer */ hdmi_write(hdmi, hdmi->edid_segment_nr, HDMI_EDID_SEGMENT_POINTER); } else if (status1 & 4) { /* Disable EDID interrupt */ hdmi_write(hdmi, 0xC0, HDMI_INTERRUPT_MASK_1); schedule_delayed_work(&hdmi->edid_work, msecs_to_jiffies(10)); } return IRQ_HANDLED; } static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity) { struct sh_hdmi *hdmi = entity_to_sh_hdmi(entity); dev_dbg(hdmi->dev, "%s(%p): state %x\n", __func__, hdmi, hdmi->hp_state); /* * hp_state can be set to * HDMI_HOTPLUG_DISCONNECTED: on monitor unplug * HDMI_HOTPLUG_CONNECTED: on monitor plug-in * HDMI_HOTPLUG_EDID_DONE: on EDID read completion */ if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) { /* PS mode d->e. All functions are active */ hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL); dev_dbg(hdmi->dev, "HDMI running\n"); } return hdmi->hp_state == HDMI_HOTPLUG_DISCONNECTED ? SH_MOBILE_LCDC_DISPLAY_DISCONNECTED : SH_MOBILE_LCDC_DISPLAY_CONNECTED; } static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity) { struct sh_hdmi *hdmi = entity_to_sh_hdmi(entity); dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi); /* PS mode e->a */ hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL); } static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = { .display_on = sh_hdmi_display_on, .display_off = sh_hdmi_display_off, }; /** * sh_hdmi_clk_configure() - set HDMI clock frequency and enable the clock * @hdmi: driver context * @hdmi_rate: HDMI clock frequency in Hz * @parent_rate: if != 0 - set parent clock rate for optimal precision * return: configured positive rate if successful * 0 if couldn't set the rate, but managed to enable the * clock, negative error, if couldn't enable the clock */ static long sh_hdmi_clk_configure(struct sh_hdmi *hdmi, unsigned long hdmi_rate, unsigned long parent_rate) { int ret; if (parent_rate && clk_get_parent(hdmi->hdmi_clk)) { ret = clk_set_rate(clk_get_parent(hdmi->hdmi_clk), parent_rate); if (ret < 0) { dev_warn(hdmi->dev, "Cannot set parent rate %ld: %d\n", parent_rate, ret); hdmi_rate = clk_round_rate(hdmi->hdmi_clk, hdmi_rate); } else { dev_dbg(hdmi->dev, "HDMI set parent frequency %lu\n", parent_rate); } } ret = clk_set_rate(hdmi->hdmi_clk, hdmi_rate); if (ret < 0) { dev_warn(hdmi->dev, "Cannot set rate %ld: %d\n", hdmi_rate, ret); hdmi_rate = 0; } else { dev_dbg(hdmi->dev, "HDMI set frequency %lu\n", hdmi_rate); } return hdmi_rate; } /* Hotplug interrupt occurred, read EDID */ static void sh_hdmi_edid_work_fn(struct work_struct *work) { struct sh_hdmi *hdmi = container_of(work, struct sh_hdmi, edid_work.work); struct sh_mobile_lcdc_chan *ch = hdmi->entity.lcdc; int ret; dev_dbg(hdmi->dev, "%s(%p): begin, hotplug status %d\n", __func__, hdmi, hdmi->hp_state); if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) { unsigned long parent_rate = 0, hdmi_rate; ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate); if (ret < 0) goto out; hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; /* Reconfigure the clock */ ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate); if (ret < 0) goto out; msleep(10); sh_hdmi_configure(hdmi); /* Switched to another (d) power-save mode */ msleep(10); if (ch && ch->notify) ch->notify(ch, SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT, &hdmi->mode, &hdmi->monspec); } else { hdmi->monspec.modedb_len = 0; fb_destroy_modedb(hdmi->monspec.modedb); hdmi->monspec.modedb = NULL; if (ch && ch->notify) ch->notify(ch, SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT, NULL, NULL); ret = 0; } out: if (ret < 0 && ret != -EAGAIN) hdmi->hp_state = HDMI_HOTPLUG_DISCONNECTED; dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi); } static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi) { hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE); hdmi_htop1_write(hdmi, 0x0000000b, 0x0010); hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE); hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1); hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2); hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1); hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2); hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1); hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2); hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1); hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2); hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1); hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2); hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1); hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2); hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE); hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW); hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN); hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN); hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR); hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET); hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE); msleep(100); hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR); msleep(100); hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR); hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET); hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN); hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY); hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2); hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET); } static int __init sh_hdmi_probe(struct platform_device *pdev) { struct sh_mobile_hdmi_info *pdata = dev_get_platdata(&pdev->dev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *htop1_res; int irq = platform_get_irq(pdev, 0), ret; struct sh_hdmi *hdmi; long rate; if (!res || !pdata || irq < 0) return -ENODEV; htop1_res = NULL; if (pdata->flags & HDMI_HAS_HTOP1) { htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!htop1_res) { dev_err(&pdev->dev, "htop1 needs register base\n"); return -EINVAL; } } hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) { dev_err(&pdev->dev, "Cannot allocate device data\n"); return -ENOMEM; } hdmi->dev = &pdev->dev; hdmi->entity.owner = THIS_MODULE; hdmi->entity.ops = &sh_hdmi_ops; hdmi->irq = irq; hdmi->hdmi_clk = clk_get(&pdev->dev, "ick"); if (IS_ERR(hdmi->hdmi_clk)) { ret = PTR_ERR(hdmi->hdmi_clk); dev_err(&pdev->dev, "Unable to get clock: %d\n", ret); return ret; } /* select register access functions */ if (pdata->flags & HDMI_32BIT_REG) { hdmi->write = __hdmi_write32; hdmi->read = __hdmi_read32; } else { hdmi->write = __hdmi_write8; hdmi->read = __hdmi_read8; } /* An arbitrary relaxed pixclock just to get things started: from standard 480p */ rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037)); if (rate > 0) rate = sh_hdmi_clk_configure(hdmi, rate, 0); if (rate < 0) { ret = rate; goto erate; } ret = clk_prepare_enable(hdmi->hdmi_clk); if (ret < 0) { dev_err(hdmi->dev, "Cannot enable clock: %d\n", ret); goto erate; } dev_dbg(&pdev->dev, "Enabled HDMI clock at %luHz\n", rate); if (!request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev))) { dev_err(&pdev->dev, "HDMI register region already claimed\n"); ret = -EBUSY; goto ereqreg; } hdmi->base = ioremap(res->start, resource_size(res)); if (!hdmi->base) { dev_err(&pdev->dev, "HDMI register region already claimed\n"); ret = -ENOMEM; goto emap; } platform_set_drvdata(pdev, &hdmi->entity); INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); /* init interrupt polarity */ if (pdata->flags & HDMI_OUTPUT_PUSH_PULL) hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL); if (pdata->flags & HDMI_OUTPUT_POLARITY_HI) hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL); /* enable htop1 register if needed */ if (htop1_res) { hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res)); if (!hdmi->htop1) { dev_err(&pdev->dev, "control register region already claimed\n"); ret = -ENOMEM; goto emap_htop1; } sh_hdmi_htop1_init(hdmi); } /* Product and revision IDs are 0 in sh-mobile version */ dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID)); ret = request_irq(irq, sh_hdmi_hotplug, 0, dev_name(&pdev->dev), hdmi); if (ret < 0) { dev_err(&pdev->dev, "Unable to request irq: %d\n", ret); goto ereqirq; } ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_sh_hdmi, &sh_hdmi_dai, 1); if (ret < 0) { dev_err(&pdev->dev, "codec registration failed\n"); goto ecodec; } return 0; ecodec: free_irq(irq, hdmi); ereqirq: if (hdmi->htop1) iounmap(hdmi->htop1); emap_htop1: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); iounmap(hdmi->base); emap: release_mem_region(res->start, resource_size(res)); ereqreg: clk_disable_unprepare(hdmi->hdmi_clk); erate: clk_put(hdmi->hdmi_clk); return ret; } static int __exit sh_hdmi_remove(struct platform_device *pdev) { struct sh_hdmi *hdmi = entity_to_sh_hdmi(platform_get_drvdata(pdev)); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); snd_soc_unregister_codec(&pdev->dev); /* No new work will be scheduled, wait for running ISR */ free_irq(hdmi->irq, hdmi); /* Wait for already scheduled work */ cancel_delayed_work_sync(&hdmi->edid_work); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(hdmi->hdmi_clk); clk_put(hdmi->hdmi_clk); if (hdmi->htop1) iounmap(hdmi->htop1); iounmap(hdmi->base); release_mem_region(res->start, resource_size(res)); return 0; } static int sh_hdmi_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct sh_hdmi *hdmi = entity_to_sh_hdmi(platform_get_drvdata(pdev)); disable_irq(hdmi->irq); /* Wait for already scheduled work */ cancel_delayed_work_sync(&hdmi->edid_work); return 0; } static int sh_hdmi_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct sh_mobile_hdmi_info *pdata = dev_get_platdata(dev); struct sh_hdmi *hdmi = entity_to_sh_hdmi(platform_get_drvdata(pdev)); /* Re-init interrupt polarity */ if (pdata->flags & HDMI_OUTPUT_PUSH_PULL) hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL); if (pdata->flags & HDMI_OUTPUT_POLARITY_HI) hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL); /* Re-init htop1 */ if (hdmi->htop1) sh_hdmi_htop1_init(hdmi); /* Now it's safe to enable interrupts again */ enable_irq(hdmi->irq); return 0; } static const struct dev_pm_ops sh_hdmi_pm_ops = { .suspend = sh_hdmi_suspend, .resume = sh_hdmi_resume, }; static struct platform_driver sh_hdmi_driver = { .remove = __exit_p(sh_hdmi_remove), .driver = { .name = "sh-mobile-hdmi", .pm = &sh_hdmi_pm_ops, }, }; module_platform_driver_probe(sh_hdmi_driver, sh_hdmi_probe); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_DESCRIPTION("SuperH / ARM-shmobile HDMI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
narantech/linux-rpi
arch/arm/mach-orion5x/lsmini-setup.c
1546
7366
/* * arch/arm/mach-orion5x/lsmini-setup.c * * Maintainer: Alexey Kopytko <alexey@kopytko.ru> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * Linkstation Mini Info ****************************************************************************/ /* * 256K NOR flash Device bus boot chip select */ #define LSMINI_NOR_BOOT_BASE 0xf4000000 #define LSMINI_NOR_BOOT_SIZE SZ_256K /***************************************************************************** * 256KB NOR Flash on BOOT Device ****************************************************************************/ static struct physmap_flash_data lsmini_nor_flash_data = { .width = 1, }; static struct resource lsmini_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = LSMINI_NOR_BOOT_BASE, .end = LSMINI_NOR_BOOT_BASE + LSMINI_NOR_BOOT_SIZE - 1, }; static struct platform_device lsmini_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &lsmini_nor_flash_data, }, .num_resources = 1, .resource = &lsmini_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data lsmini_eth_data = { .phy_addr = 8, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ static struct i2c_board_info __initdata lsmini_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), }; /***************************************************************************** * LEDs attached to GPIO ****************************************************************************/ #define LSMINI_GPIO_LED_ALARM 2 #define LSMINI_GPIO_LED_INFO 3 #define LSMINI_GPIO_LED_FUNC 9 #define LSMINI_GPIO_LED_PWR 14 static struct gpio_led lsmini_led_pins[] = { { .name = "alarm:red", .gpio = LSMINI_GPIO_LED_ALARM, .active_low = 1, }, { .name = "info:amber", .gpio = LSMINI_GPIO_LED_INFO, .active_low = 1, }, { .name = "func:blue:top", .gpio = LSMINI_GPIO_LED_FUNC, .active_low = 1, }, { .name = "power:blue:bottom", .gpio = LSMINI_GPIO_LED_PWR, }, }; static struct gpio_led_platform_data lsmini_led_data = { .leds = lsmini_led_pins, .num_leds = ARRAY_SIZE(lsmini_led_pins), }; static struct platform_device lsmini_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &lsmini_led_data, }, }; /**************************************************************************** * GPIO Attached Keys ****************************************************************************/ #define LSMINI_GPIO_KEY_FUNC 15 #define LSMINI_GPIO_KEY_POWER 18 #define LSMINI_GPIO_KEY_AUTOPOWER 17 #define LSMINI_SW_POWER 0x00 #define LSMINI_SW_AUTOPOWER 0x01 static struct gpio_keys_button lsmini_buttons[] = { { .code = KEY_OPTION, .gpio = LSMINI_GPIO_KEY_FUNC, .desc = "Function Button", .active_low = 1, }, { .type = EV_SW, .code = LSMINI_SW_POWER, .gpio = LSMINI_GPIO_KEY_POWER, .desc = "Power-on Switch", .active_low = 1, }, { .type = EV_SW, .code = LSMINI_SW_AUTOPOWER, .gpio = LSMINI_GPIO_KEY_AUTOPOWER, .desc = "Power-auto Switch", .active_low = 1, }, }; static struct gpio_keys_platform_data lsmini_button_data = { .buttons = lsmini_buttons, .nbuttons = ARRAY_SIZE(lsmini_buttons), }; static struct platform_device lsmini_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &lsmini_button_data, }, }; /***************************************************************************** * SATA ****************************************************************************/ static struct mv_sata_platform_data lsmini_sata_data = { .n_ports = 2, }; /***************************************************************************** * Linkstation Mini specific power off method: reboot ****************************************************************************/ /* * On the Linkstation Mini, the shutdown process is following: * - Userland monitors key events until the power switch goes to off position * - The board reboots * - U-boot starts and goes into an idle mode waiting for the user * to move the switch to ON position */ static void lsmini_power_off(void) { orion5x_restart(REBOOT_HARD, NULL); } /***************************************************************************** * General Setup ****************************************************************************/ #define LSMINI_GPIO_USB_POWER 16 #define LSMINI_GPIO_AUTO_POWER 17 #define LSMINI_GPIO_POWER 18 #define LSMINI_GPIO_HDD_POWER0 1 #define LSMINI_GPIO_HDD_POWER1 19 static unsigned int lsmini_mpp_modes[] __initdata = { MPP0_UNUSED, /* LED_RESERVE1 (unused) */ MPP1_GPIO, /* HDD_PWR */ MPP2_GPIO, /* LED_ALARM */ MPP3_GPIO, /* LED_INFO */ MPP4_UNUSED, MPP5_UNUSED, MPP6_UNUSED, MPP7_UNUSED, MPP8_UNUSED, MPP9_GPIO, /* LED_FUNC */ MPP10_UNUSED, MPP11_UNUSED, /* LED_ETH (dummy) */ MPP12_UNUSED, MPP13_UNUSED, MPP14_GPIO, /* LED_PWR */ MPP15_GPIO, /* FUNC */ MPP16_GPIO, /* USB_PWR */ MPP17_GPIO, /* AUTO_POWER */ MPP18_GPIO, /* POWER */ MPP19_GPIO, /* HDD_PWR1 */ 0, }; static void __init lsmini_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(lsmini_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_ehci1_init(); orion5x_eth_init(&lsmini_eth_data); orion5x_i2c_init(); orion5x_sata_init(&lsmini_sata_data); orion5x_uart0_init(); orion5x_xor_init(); mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET, ORION_MBUS_DEVBUS_BOOT_ATTR, LSMINI_NOR_BOOT_BASE, LSMINI_NOR_BOOT_SIZE); platform_device_register(&lsmini_nor_flash); platform_device_register(&lsmini_button_device); platform_device_register(&lsmini_leds); i2c_register_board_info(0, &lsmini_i2c_rtc, 1); /* enable USB power */ gpio_set_value(LSMINI_GPIO_USB_POWER, 1); /* register power-off method */ pm_power_off = lsmini_power_off; pr_info("%s: finished\n", __func__); } #ifdef CONFIG_MACH_LINKSTATION_MINI MACHINE_START(LINKSTATION_MINI, "Buffalo Linkstation Mini") /* Maintainer: Alexey Kopytko <alexey@kopytko.ru> */ .atag_offset = 0x100, .init_machine = lsmini_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .init_time = orion5x_timer_init, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END #endif
gpl-2.0
forth32/kernel-Y900
drivers/isdn/hisax/w6692.c
2314
29382
/* $Id: w6692.c,v 1.18.2.4 2004/02/11 13:21:34 keil Exp $ * * Winbond W6692 specific routines * * Author Petr Novak * Copyright by Petr Novak <petr.novak@i.cz> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "w6692.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> /* table entry in the PCI devices list */ typedef struct { int vendor_id; int device_id; char *vendor_name; char *card_name; } PCI_ENTRY; static const PCI_ENTRY id_list[] = { {PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, "Winbond", "W6692"}, {PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, "Dynalink/AsusCom", "IS64PH"}, {0, 0, "U.S.Robotics", "ISDN PCI Card TA"} }; #define W6692_SV_USR 0x16ec #define W6692_SD_USR 0x3409 #define W6692_WINBOND 0 #define W6692_DYNALINK 1 #define W6692_USR 2 static const char *w6692_revision = "$Revision: 1.18.2.4 $"; #define DBUSY_TIMER_VALUE 80 static char *W6692Ver[] = {"W6692 V00", "W6692 V01", "W6692 V10", "W6692 V11"}; static void W6692Version(struct IsdnCardState *cs, char *s) { int val; val = cs->readW6692(cs, W_D_RBCH); printk(KERN_INFO "%s Winbond W6692 version (%x): %s\n", s, val, W6692Ver[(val >> 6) & 3]); } static void ph_command(struct IsdnCardState *cs, unsigned int command) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_command %x", command); cs->writeisac(cs, W_CIX, command); } static void W6692_new_ph(struct IsdnCardState *cs) { switch (cs->dc.w6692.ph_state) { case (W_L1CMD_RST): ph_command(cs, W_L1CMD_DRC); l1_msg(cs, HW_RESET | INDICATION, NULL); /* fallthru */ case (W_L1IND_CD): l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL); break; case (W_L1IND_DRD): l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL); break; case (W_L1IND_CE): l1_msg(cs, HW_POWERUP | CONFIRM, NULL); break; case (W_L1IND_LD): l1_msg(cs, HW_RSYNC | INDICATION, NULL); break; case (W_L1IND_ARD): l1_msg(cs, HW_INFO2 | INDICATION, NULL); break; case (W_L1IND_AI8): l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; case (W_L1IND_AI10): l1_msg(cs, HW_INFO4_P10 | INDICATION, NULL); break; default: break; } } static void W6692_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); struct PStack *stptr; if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) { if (cs->debug) debugl1(cs, "D-Channel Busy cleared"); stptr = cs->stlist; while (stptr != NULL) { stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL); stptr = stptr->next; } } if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) W6692_new_ph(cs); if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) DChannel_proc_rcv(cs); if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) DChannel_proc_xmt(cs); /* if (test_and_clear_bit(D_RX_MON1, &cs->event)) arcofi_fsm(cs, ARCOFI_RX_END, NULL); if (test_and_clear_bit(D_TX_MON1, &cs->event)) arcofi_fsm(cs, ARCOFI_TX_END, NULL); */ } static void W6692_empty_fifo(struct IsdnCardState *cs, int count) { u_char *ptr; if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO)) debugl1(cs, "W6692_empty_fifo"); if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692_empty_fifo overrun %d", cs->rcvidx + count); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK); cs->rcvidx = 0; return; } ptr = cs->rcvbuf + cs->rcvidx; cs->rcvidx += count; cs->readW6692fifo(cs, ptr, count); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK); if (cs->debug & L1_DEB_ISAC_FIFO) { char *t = cs->dlog; t += sprintf(t, "W6692_empty_fifo cnt %d", count); QuickHex(t, ptr, count); debugl1(cs, cs->dlog); } } static void W6692_fill_fifo(struct IsdnCardState *cs) { int count, more; u_char *ptr; if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO)) debugl1(cs, "W6692_fill_fifo"); if (!cs->tx_skb) return; count = cs->tx_skb->len; if (count <= 0) return; more = 0; if (count > W_D_FIFO_THRESH) { more = !0; count = W_D_FIFO_THRESH; } ptr = cs->tx_skb->data; skb_pull(cs->tx_skb, count); cs->tx_cnt += count; cs->writeW6692fifo(cs, ptr, count); cs->writeW6692(cs, W_D_CMDR, more ? W_D_CMDR_XMS : (W_D_CMDR_XMS | W_D_CMDR_XME)); if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) { debugl1(cs, "W6692_fill_fifo dbusytimer running"); del_timer(&cs->dbusytimer); } init_timer(&cs->dbusytimer); cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000); add_timer(&cs->dbusytimer); if (cs->debug & L1_DEB_ISAC_FIFO) { char *t = cs->dlog; t += sprintf(t, "W6692_fill_fifo cnt %d", count); QuickHex(t, ptr, count); debugl1(cs, cs->dlog); } } static void W6692B_empty_fifo(struct BCState *bcs, int count) { u_char *ptr; struct IsdnCardState *cs = bcs->cs; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "W6692B_empty_fifo"); if (bcs->hw.w6692.rcvidx + count > HSCX_BUFMAX) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692B_empty_fifo: incoming packet too large"); cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); bcs->hw.w6692.rcvidx = 0; return; } ptr = bcs->hw.w6692.rcvbuf + bcs->hw.w6692.rcvidx; bcs->hw.w6692.rcvidx += count; READW6692BFIFO(cs, bcs->channel, ptr, count); cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "W6692B_empty_fifo %c cnt %d", bcs->channel + '1', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void W6692B_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int more, count; u_char *ptr; if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0; if (bcs->tx_skb->len > W_B_FIFO_THRESH) { more = 1; count = W_B_FIFO_THRESH; } else count = bcs->tx_skb->len; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "W6692B_fill_fifo%s%d", (more ? " " : " last "), count); ptr = bcs->tx_skb->data; skb_pull(bcs->tx_skb, count); bcs->tx_cnt -= count; bcs->hw.w6692.count += count; WRITEW6692BFIFO(cs, bcs->channel, ptr, count); cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACT | W_B_CMDR_XMS | (more ? 0 : W_B_CMDR_XME)); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "W6692B_fill_fifo %c cnt %d", bcs->channel + '1', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void W6692B_interrupt(struct IsdnCardState *cs, u_char bchan) { u_char val; u_char r; struct BCState *bcs; struct sk_buff *skb; int count; bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs + 1); val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR); debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val); if (!test_bit(BC_FLG_INIT, &bcs->Flag)) { debugl1(cs, "W6692B not INIT yet"); return; } if (val & W_B_EXI_RME) { /* RME */ r = cs->BC_Read_Reg(cs, bchan, W_B_STAR); if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B STAR %x", r); if ((r & W_B_STAR_RDOV) && bcs->mode) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B RDOV mode=%d", bcs->mode); if (r & W_B_STAR_CRCE) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B CRC error"); cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT); } else { count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1); if (count == 0) count = W_B_FIFO_THRESH; W6692B_empty_fifo(bcs, count); if ((count = bcs->hw.w6692.rcvidx) > 0) { if (cs->debug & L1_DEB_HSCX_FIFO) debugl1(cs, "W6692 Bchan Frame %d", count); if (!(skb = dev_alloc_skb(count))) printk(KERN_WARNING "W6692: Bchan receive out of memory\n"); else { memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count); skb_queue_tail(&bcs->rqueue, skb); } } } bcs->hw.w6692.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } if (val & W_B_EXI_RMR) { /* RMR */ W6692B_empty_fifo(bcs, W_B_FIFO_THRESH); r = cs->BC_Read_Reg(cs, bchan, W_B_STAR); if (r & W_B_STAR_RDOV) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B RDOV(RMR) mode=%d", bcs->mode); cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT); if (bcs->mode != L1_MODE_TRANS) bcs->hw.w6692.rcvidx = 0; } if (bcs->mode == L1_MODE_TRANS) { /* receive audio data */ if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH))) printk(KERN_WARNING "HiSax: receive out of memory\n"); else { memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH); skb_queue_tail(&bcs->rqueue, skb); } bcs->hw.w6692.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } } if (val & W_B_EXI_XDUN) { /* XDUN */ cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT); if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B EXIR %x Lost TX", val); if (bcs->mode == 1) W6692B_fill_fifo(bcs); else { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.w6692.count); bcs->tx_cnt += bcs->hw.w6692.count; bcs->hw.w6692.count = 0; } } return; } if (val & W_B_EXI_XFR) { /* XFR */ r = cs->BC_Read_Reg(cs, bchan, W_B_STAR); if (r & W_B_STAR_XDOW) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B STAR %x XDOW", r); cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT); if (bcs->tx_skb && (bcs->mode != 1)) { skb_push(bcs->tx_skb, bcs->hw.w6692.count); bcs->tx_cnt += bcs->hw.w6692.count; bcs->hw.w6692.count = 0; } } if (bcs->tx_skb) { if (bcs->tx_skb->len) { W6692B_fill_fifo(bcs); return; } else { if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->hw.w6692.count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_irq(bcs->tx_skb); bcs->hw.w6692.count = 0; bcs->tx_skb = NULL; } } if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { bcs->hw.w6692.count = 0; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); W6692B_fill_fifo(bcs); } else { test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); schedule_event(bcs, B_XMTBUFREADY); } } } static irqreturn_t W6692_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val, exval, v1; struct sk_buff *skb; u_int count; u_long flags; int icnt = 5; spin_lock_irqsave(&cs->lock, flags); val = cs->readW6692(cs, W_ISTA); if (!val) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } StartW6692: if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 ISTA %x", val); if (val & W_INT_D_RME) { /* RME */ exval = cs->readW6692(cs, W_D_RSTA); if (exval & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) { if (exval & W_D_RSTA_RDOV) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 RDOV"); if (exval & W_D_RSTA_CRCE) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D-channel CRC error"); if (exval & W_D_RSTA_RMB) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D-channel ABORT"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST); } else { count = cs->readW6692(cs, W_D_RBCL) & (W_D_FIFO_THRESH - 1); if (count == 0) count = W_D_FIFO_THRESH; W6692_empty_fifo(cs, count); if ((count = cs->rcvidx) > 0) { cs->rcvidx = 0; if (!(skb = alloc_skb(count, GFP_ATOMIC))) printk(KERN_WARNING "HiSax: D receive out of memory\n"); else { memcpy(skb_put(skb, count), cs->rcvbuf, count); skb_queue_tail(&cs->rq, skb); } } } cs->rcvidx = 0; schedule_event(cs, D_RCVBUFREADY); } if (val & W_INT_D_RMR) { /* RMR */ W6692_empty_fifo(cs, W_D_FIFO_THRESH); } if (val & W_INT_D_XFR) { /* XFR */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { W6692_fill_fifo(cs); goto afterXFR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; W6692_fill_fifo(cs); } else schedule_event(cs, D_XMTBUFREADY); } afterXFR: if (val & (W_INT_XINT0 | W_INT_XINT1)) { /* XINT0/1 - never */ if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 spurious XINT!"); } if (val & W_INT_D_EXI) { /* EXI */ exval = cs->readW6692(cs, W_D_EXIR); if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D_EXIR %02x", exval); if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) { /* Transmit underrun/collision */ debugl1(cs, "W6692 D-chan underrun/collision"); printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL\n"); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { /* Restart frame */ skb_push(cs->tx_skb, cs->tx_cnt); cs->tx_cnt = 0; W6692_fill_fifo(cs); } else { printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL no skb\n"); debugl1(cs, "W6692 XDUN/XCOL no skb"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); } } if (exval & W_D_EXI_RDOV) { /* RDOV */ debugl1(cs, "W6692 D-channel RDOV"); printk(KERN_WARNING "HiSax: W6692 D-RDOV\n"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST); } if (exval & W_D_EXI_TIN2) { /* TIN2 - never */ debugl1(cs, "W6692 spurious TIN2 interrupt"); } if (exval & W_D_EXI_MOC) { /* MOC - not supported */ debugl1(cs, "W6692 spurious MOC interrupt"); v1 = cs->readW6692(cs, W_MOSR); debugl1(cs, "W6692 MOSR %02x", v1); } if (exval & W_D_EXI_ISC) { /* ISC - Level1 change */ v1 = cs->readW6692(cs, W_CIR); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 ISC CIR=0x%02X", v1); if (v1 & W_CIR_ICC) { cs->dc.w6692.ph_state = v1 & W_CIR_COD_MASK; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state_change %x", cs->dc.w6692.ph_state); schedule_event(cs, D_L1STATECHANGE); } if (v1 & W_CIR_SCC) { v1 = cs->readW6692(cs, W_SQR); debugl1(cs, "W6692 SCC SQR=0x%02X", v1); } } if (exval & W_D_EXI_WEXP) { debugl1(cs, "W6692 spurious WEXP interrupt!"); } if (exval & W_D_EXI_TEXP) { debugl1(cs, "W6692 spurious TEXP interrupt!"); } } if (val & W_INT_B1_EXI) { debugl1(cs, "W6692 B channel 1 interrupt"); W6692B_interrupt(cs, 0); } if (val & W_INT_B2_EXI) { debugl1(cs, "W6692 B channel 2 interrupt"); W6692B_interrupt(cs, 1); } val = cs->readW6692(cs, W_ISTA); if (val && icnt) { icnt--; goto StartW6692; } if (!icnt) { printk(KERN_WARNING "W6692 IRQ LOOP\n"); cs->writeW6692(cs, W_IMASK, 0xff); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void W6692_l1hw(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; u_long flags; int val; switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0); #endif W6692_fill_fifo(cs); } spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); spin_unlock_irqrestore(&cs->lock, flags); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0); #endif W6692_fill_fifo(cs); spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); if ((cs->dc.w6692.ph_state == W_L1IND_DRD)) { ph_command(cs, W_L1CMD_ECK); spin_unlock_irqrestore(&cs->lock, flags); } else { ph_command(cs, W_L1CMD_RST); cs->dc.w6692.ph_state = W_L1CMD_RST; spin_unlock_irqrestore(&cs->lock, flags); W6692_new_ph(cs); } break; case (HW_ENABLE | REQUEST): spin_lock_irqsave(&cs->lock, flags); ph_command(cs, W_L1CMD_ECK); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_INFO3 | REQUEST): spin_lock_irqsave(&cs->lock, flags); ph_command(cs, W_L1CMD_AR8); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_TESTLOOP | REQUEST): val = 0; if (1 & (long) arg) val |= 0x0c; if (2 & (long) arg) val |= 0x3; /* !!! not implemented yet */ break; case (HW_DEACTIVATE | RESPONSE): skb_queue_purge(&cs->rq); skb_queue_purge(&cs->sq); if (cs->tx_skb) { dev_kfree_skb_any(cs->tx_skb); cs->tx_skb = NULL; } if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692_l1hw unknown %04x", pr); break; } } static void setstack_W6692(struct PStack *st, struct IsdnCardState *cs) { st->l1.l1hw = W6692_l1hw; } static void DC_Close_W6692(struct IsdnCardState *cs) { } static void dbusy_timer_handler(struct IsdnCardState *cs) { struct PStack *stptr; int rbch, star; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) { rbch = cs->readW6692(cs, W_D_RBCH); star = cs->readW6692(cs, W_D_STAR); if (cs->debug) debugl1(cs, "D-Channel Busy D_RBCH %02x D_STAR %02x", rbch, star); if (star & W_D_STAR_XBZ) { /* D-Channel Busy */ test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags); stptr = cs->stlist; while (stptr != NULL) { stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL); stptr = stptr->next; } } else { /* discard frame; reset transceiver */ test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags); if (cs->tx_skb) { dev_kfree_skb_any(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } else { printk(KERN_WARNING "HiSax: W6692 D-Channel Busy no skb\n"); debugl1(cs, "D-Channel Busy no skb"); } cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); /* Transmitter reset */ spin_unlock_irqrestore(&cs->lock, flags); cs->irq_func(cs->irq, cs); return; } } spin_unlock_irqrestore(&cs->lock, flags); } static void W6692Bmode(struct BCState *bcs, int mode, int bchan) { struct IsdnCardState *cs = bcs->cs; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "w6692 %c mode %d ichan %d", '1' + bchan, mode, bchan); bcs->mode = mode; bcs->channel = bchan; bcs->hw.w6692.bchan = bchan; switch (mode) { case (L1_MODE_NULL): cs->BC_Write_Reg(cs, bchan, W_B_MODE, 0); break; case (L1_MODE_TRANS): cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_MMS); break; case (L1_MODE_HDLC): cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_ITF); cs->BC_Write_Reg(cs, bchan, W_B_ADM1, 0xff); cs->BC_Write_Reg(cs, bchan, W_B_ADM2, 0xff); break; } if (mode) cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_RACT | W_B_CMDR_XRST); cs->BC_Write_Reg(cs, bchan, W_B_EXIM, 0x00); } static void W6692_l2l1(struct PStack *st, int pr, void *arg) { struct sk_buff *skb = arg; struct BCState *bcs = st->l1.bcs; u_long flags; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->hw.w6692.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): if (bcs->tx_skb) { printk(KERN_WARNING "W6692_l2l1: this shouldn't happen\n"); break; } spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->hw.w6692.count = 0; bcs->cs->BC_Send_Data(bcs); spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); W6692Bmode(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); W6692Bmode(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } static void close_w6692state(struct BCState *bcs) { W6692Bmode(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { kfree(bcs->hw.w6692.rcvbuf); bcs->hw.w6692.rcvbuf = NULL; kfree(bcs->blog); bcs->blog = NULL; skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } static int open_w6692state(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { if (!(bcs->hw.w6692.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for w6692.rcvbuf\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); return (1); } if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for bcs->blog\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); kfree(bcs->hw.w6692.rcvbuf); bcs->hw.w6692.rcvbuf = NULL; return (2); } skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->hw.w6692.rcvidx = 0; bcs->tx_cnt = 0; return (0); } static int setstack_w6692(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_w6692state(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = W6692_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } static void resetW6692(struct IsdnCardState *cs) { cs->writeW6692(cs, W_D_CTL, W_D_CTL_SRST); mdelay(10); cs->writeW6692(cs, W_D_CTL, 0x00); mdelay(10); cs->writeW6692(cs, W_IMASK, 0xff); cs->writeW6692(cs, W_D_SAM, 0xff); cs->writeW6692(cs, W_D_TAM, 0xff); cs->writeW6692(cs, W_D_EXIM, 0x00); cs->writeW6692(cs, W_D_MODE, W_D_MODE_RACT); cs->writeW6692(cs, W_IMASK, 0x18); if (cs->subtyp == W6692_USR) { /* seems that USR implemented some power control features * Pin 79 is connected to the oscilator circuit so we * have to handle it here */ cs->writeW6692(cs, W_PCTL, 0x80); cs->writeW6692(cs, W_XDATA, 0x00); } } static void initW6692(struct IsdnCardState *cs, int part) { if (part & 1) { cs->setstack_d = setstack_W6692; cs->DC_Close = DC_Close_W6692; cs->dbusytimer.function = (void *) dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); resetW6692(cs); ph_command(cs, W_L1CMD_RST); cs->dc.w6692.ph_state = W_L1CMD_RST; W6692_new_ph(cs); ph_command(cs, W_L1CMD_ECK); cs->bcs[0].BC_SetStack = setstack_w6692; cs->bcs[1].BC_SetStack = setstack_w6692; cs->bcs[0].BC_Close = close_w6692state; cs->bcs[1].BC_Close = close_w6692state; W6692Bmode(cs->bcs, 0, 0); W6692Bmode(cs->bcs + 1, 0, 0); } if (part & 2) { /* Reenable all IRQ */ cs->writeW6692(cs, W_IMASK, 0x18); cs->writeW6692(cs, W_D_EXIM, 0x00); cs->BC_Write_Reg(cs, 0, W_B_EXIM, 0x00); cs->BC_Write_Reg(cs, 1, W_B_EXIM, 0x00); /* Reset D-chan receiver and transmitter */ cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST | W_D_CMDR_XRST); } } /* Interface functions */ static u_char ReadW6692(struct IsdnCardState *cs, u_char offset) { return (inb(cs->hw.w6692.iobase + offset)); } static void WriteW6692(struct IsdnCardState *cs, u_char offset, u_char value) { outb(value, cs->hw.w6692.iobase + offset); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { insb(cs->hw.w6692.iobase + W_D_RFIFO, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { outsb(cs->hw.w6692.iobase + W_D_XFIFO, data, size); } static u_char ReadW6692B(struct IsdnCardState *cs, int bchan, u_char offset) { return (inb(cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset)); } static void WriteW6692B(struct IsdnCardState *cs, int bchan, u_char offset, u_char value) { outb(value, cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset); } static int w6692_card_msg(struct IsdnCardState *cs, int mt, void *arg) { switch (mt) { case CARD_RESET: resetW6692(cs); return (0); case CARD_RELEASE: cs->writeW6692(cs, W_IMASK, 0xff); release_region(cs->hw.w6692.iobase, 256); if (cs->subtyp == W6692_USR) { cs->writeW6692(cs, W_XDATA, 0x04); } return (0); case CARD_INIT: initW6692(cs, 3); return (0); case CARD_TEST: return (0); } return (0); } static int id_idx; static struct pci_dev *dev_w6692 = NULL; int setup_w6692(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; u_char found = 0; u_char pci_irq = 0; u_int pci_ioaddr = 0; strcpy(tmp, w6692_revision); printk(KERN_INFO "HiSax: W6692 driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_W6692) return (0); while (id_list[id_idx].vendor_id) { dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id, id_list[id_idx].device_id, dev_w6692); if (dev_w6692) { if (pci_enable_device(dev_w6692)) continue; cs->subtyp = id_idx; break; } id_idx++; } if (dev_w6692) { found = 1; pci_irq = dev_w6692->irq; /* I think address 0 is allways the configuration area */ /* and address 1 is the real IO space KKe 03.09.99 */ pci_ioaddr = pci_resource_start(dev_w6692, 1); /* USR ISDN PCI card TA need some special handling */ if (cs->subtyp == W6692_WINBOND) { if ((W6692_SV_USR == dev_w6692->subsystem_vendor) && (W6692_SD_USR == dev_w6692->subsystem_device)) { cs->subtyp = W6692_USR; } } } if (!found) { printk(KERN_WARNING "W6692: No PCI card found\n"); return (0); } cs->irq = pci_irq; if (!cs->irq) { printk(KERN_WARNING "W6692: No IRQ for PCI card found\n"); return (0); } if (!pci_ioaddr) { printk(KERN_WARNING "W6692: NO I/O Base Address found\n"); return (0); } cs->hw.w6692.iobase = pci_ioaddr; printk(KERN_INFO "Found: %s %s, I/O base: 0x%x, irq: %d\n", id_list[cs->subtyp].vendor_name, id_list[cs->subtyp].card_name, pci_ioaddr, pci_irq); if (!request_region(cs->hw.w6692.iobase, 256, id_list[cs->subtyp].card_name)) { printk(KERN_WARNING "HiSax: %s I/O ports %x-%x already in use\n", id_list[cs->subtyp].card_name, cs->hw.w6692.iobase, cs->hw.w6692.iobase + 255); return (0); } printk(KERN_INFO "HiSax: %s config irq:%d I/O:%x\n", id_list[cs->subtyp].card_name, cs->irq, cs->hw.w6692.iobase); INIT_WORK(&cs->tqueue, W6692_bh); cs->readW6692 = &ReadW6692; cs->writeW6692 = &WriteW6692; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadW6692B; cs->BC_Write_Reg = &WriteW6692B; cs->BC_Send_Data = &W6692B_fill_fifo; cs->cardmsg = &w6692_card_msg; cs->irq_func = &W6692_interrupt; cs->irq_flags |= IRQF_SHARED; W6692Version(cs, "W6692:"); printk(KERN_INFO "W6692 ISTA=0x%X\n", ReadW6692(cs, W_ISTA)); printk(KERN_INFO "W6692 IMASK=0x%X\n", ReadW6692(cs, W_IMASK)); printk(KERN_INFO "W6692 D_EXIR=0x%X\n", ReadW6692(cs, W_D_EXIR)); printk(KERN_INFO "W6692 D_EXIM=0x%X\n", ReadW6692(cs, W_D_EXIM)); printk(KERN_INFO "W6692 D_RSTA=0x%X\n", ReadW6692(cs, W_D_RSTA)); return (1); }
gpl-2.0
erorcun/android_kernel_oneplus_msm8974-3.10
drivers/staging/rtl8187se/r8180_rtl8225z2.c
2314
22222
/* * This is part of the rtl8180-sa2400 driver * released under the GPL (See file COPYING for details). * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * This files contains programming code for the rtl8225 * radio frontend. * * *Many* thanks to Realtek Corp. for their great support! */ #include "r8180_hw.h" #include "r8180_rtl8225.h" #include "r8180_93cx6.h" #include "ieee80211/dot11d.h" static void write_rtl8225(struct net_device *dev, u8 adr, u16 data) { int i; u16 out, select; u8 bit; u32 bangdata = (data << 4) | (adr & 0xf); out = read_nic_word(dev, RFPinsOutput) & 0xfff3; write_nic_word(dev, RFPinsEnable, (read_nic_word(dev, RFPinsEnable) | 0x7)); select = read_nic_word(dev, RFPinsSelect); write_nic_word(dev, RFPinsSelect, select | 0x7 | SW_CONTROL_GPIO); force_pci_posting(dev); udelay(10); write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); force_pci_posting(dev); udelay(2); write_nic_word(dev, RFPinsOutput, out); force_pci_posting(dev); udelay(10); for (i = 15; i >= 0; i--) { bit = (bangdata & (1 << i)) >> i; write_nic_word(dev, RFPinsOutput, bit | out); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); i--; bit = (bangdata & (1 << i)) >> i; write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out); } write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); force_pci_posting(dev); udelay(10); write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); write_nic_word(dev, RFPinsSelect, select | SW_CONTROL_GPIO); rtl8185_rf_pins_enable(dev); } static const u8 rtl8225_agc[] = { 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }; static const u32 rtl8225_chan[] = { 0, 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380, 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A, }; static const u8 rtl8225z2_gain_bg[] = { 0x23, 0x15, 0xa5, /* -82-1dBm */ 0x23, 0x15, 0xb5, /* -82-2dBm */ 0x23, 0x15, 0xc5, /* -82-3dBm */ 0x33, 0x15, 0xc5, /* -78dBm */ 0x43, 0x15, 0xc5, /* -74dBm */ 0x53, 0x15, 0xc5, /* -70dBm */ 0x63, 0x15, 0xc5, /* -66dBm */ }; static const u8 rtl8225z2_gain_a[] = { 0x13, 0x27, 0x5a, /* -82dBm */ 0x23, 0x23, 0x58, /* -82dBm */ 0x33, 0x1f, 0x56, /* -82dBm */ 0x43, 0x1b, 0x54, /* -78dBm */ 0x53, 0x17, 0x51, /* -74dBm */ 0x63, 0x24, 0x4f, /* -70dBm */ 0x73, 0x0f, 0x4c, /* -66dBm */ }; static const u16 rtl8225z2_rxgain[] = { 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb }; void rtl8225z2_set_gain(struct net_device *dev, short gain) { const u8 *rtl8225_gain; struct r8180_priv *priv = ieee80211_priv(dev); u8 mode = priv->ieee80211->mode; if (mode == IEEE_B || mode == IEEE_G) rtl8225_gain = rtl8225z2_gain_bg; else rtl8225_gain = rtl8225z2_gain_a; write_phy_ofdm(dev, 0x0b, rtl8225_gain[gain * 3]); write_phy_ofdm(dev, 0x1b, rtl8225_gain[gain * 3 + 1]); write_phy_ofdm(dev, 0x1d, rtl8225_gain[gain * 3 + 2]); write_phy_ofdm(dev, 0x21, 0x37); } static u32 read_rtl8225(struct net_device *dev, u8 adr) { u32 data2Write = ((u32)(adr & 0x1f)) << 27; u32 dataRead; u32 mask; u16 oval, oval2, oval3, tmp; int i; short bit, rw; u8 wLength = 6; u8 rLength = 12; u8 low2high = 0; oval = read_nic_word(dev, RFPinsOutput); oval2 = read_nic_word(dev, RFPinsEnable); oval3 = read_nic_word(dev, RFPinsSelect); write_nic_word(dev, RFPinsEnable, (oval2|0xf)); write_nic_word(dev, RFPinsSelect, (oval3|0xf)); dataRead = 0; oval &= ~0xf; write_nic_word(dev, RFPinsOutput, oval | BB_HOST_BANG_EN); udelay(4); write_nic_word(dev, RFPinsOutput, oval); udelay(5); rw = 0; mask = (low2high) ? 0x01 : (((u32)0x01)<<(32-1)); for (i = 0; i < wLength/2; i++) { bit = ((data2Write&mask) != 0) ? 1 : 0; write_nic_word(dev, RFPinsOutput, bit | oval | rw); udelay(1); write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); mask = (low2high) ? (mask<<1) : (mask>>1); if (i == 2) { rw = BB_HOST_BANG_RW; write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); write_nic_word(dev, RFPinsOutput, bit | oval | rw); udelay(2); break; } bit = ((data2Write&mask) != 0) ? 1 : 0; write_nic_word(dev, RFPinsOutput, oval | bit | rw | BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, oval | bit | rw | BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, oval | bit | rw); udelay(1); mask = (low2high) ? (mask<<1) : (mask>>1); } write_nic_word(dev, RFPinsOutput, rw|oval); udelay(2); mask = (low2high) ? 0x01 : (((u32)0x01) << (12-1)); /* * We must set data pin to HW controlled, otherwise RF can't driver it * and value RF register won't be able to read back properly. */ write_nic_word(dev, RFPinsEnable, (oval2 & (~0x01))); for (i = 0; i < rLength; i++) { write_nic_word(dev, RFPinsOutput, rw|oval); udelay(1); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); tmp = read_nic_word(dev, RFPinsInput); dataRead |= (tmp & BB_HOST_BANG_CLK ? mask : 0); write_nic_word(dev, RFPinsOutput, (rw|oval)); udelay(2); mask = (low2high) ? (mask<<1) : (mask>>1); } write_nic_word(dev, RFPinsOutput, BB_HOST_BANG_EN | BB_HOST_BANG_RW | oval); udelay(2); write_nic_word(dev, RFPinsEnable, oval2); write_nic_word(dev, RFPinsSelect, oval3); /* Set To SW Switch */ write_nic_word(dev, RFPinsOutput, 0x3a0); return dataRead; } void rtl8225z2_rf_close(struct net_device *dev) { RF_WriteReg(dev, 0x4, 0x1f); force_pci_posting(dev); mdelay(1); rtl8180_set_anaparam(dev, RTL8225z2_ANAPARAM_OFF); rtl8185_set_anaparam2(dev, RTL8225z2_ANAPARAM2_OFF); } /* * Map dBm into Tx power index according to current HW model, for example, * RF and PA, and current wireless mode. */ s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode, s32 PowerInDbm) { bool bUseDefault = true; s8 TxPwrIdx = 0; /* * OFDM Power in dBm = Index * 0.5 + 0 * CCK Power in dBm = Index * 0.25 + 13 */ s32 tmp = 0; if (WirelessMode == WIRELESS_MODE_G) { bUseDefault = false; tmp = (2 * PowerInDbm); if (tmp < 0) TxPwrIdx = 0; else if (tmp > 40) /* 40 means 20 dBm. */ TxPwrIdx = 40; else TxPwrIdx = (s8)tmp; } else if (WirelessMode == WIRELESS_MODE_B) { bUseDefault = false; tmp = (4 * PowerInDbm) - 52; if (tmp < 0) TxPwrIdx = 0; else if (tmp > 28) /* 28 means 20 dBm. */ TxPwrIdx = 28; else TxPwrIdx = (s8)tmp; } /* * TRUE if we want to use a default implementation. * We shall set it to FALSE when we have exact translation formula * for target IC. 070622, by rcnjko. */ if (bUseDefault) { if (PowerInDbm < 0) TxPwrIdx = 0; else if (PowerInDbm > 35) TxPwrIdx = 35; else TxPwrIdx = (u8)PowerInDbm; } return TxPwrIdx; } void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch) { struct r8180_priv *priv = ieee80211_priv(dev); u8 max_cck_power_level; u8 max_ofdm_power_level; u8 min_ofdm_power_level; char cck_power_level = (char)(0xff & priv->chtxpwr[ch]); char ofdm_power_level = (char)(0xff & priv->chtxpwr_ofdm[ch]); if (IS_DOT11D_ENABLE(priv->ieee80211) && IS_DOT11D_STATE_DONE(priv->ieee80211)) { u8 MaxTxPwrInDbm = DOT11D_GetMaxTxPwrInDbm(priv->ieee80211, ch); u8 CckMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_B, MaxTxPwrInDbm); u8 OfdmMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_G, MaxTxPwrInDbm); if (cck_power_level > CckMaxPwrIdx) cck_power_level = CckMaxPwrIdx; if (ofdm_power_level > OfdmMaxPwrIdx) ofdm_power_level = OfdmMaxPwrIdx; } max_cck_power_level = 15; max_ofdm_power_level = 25; min_ofdm_power_level = 10; if (cck_power_level > 35) cck_power_level = 35; write_nic_byte(dev, CCK_TXAGC, cck_power_level); force_pci_posting(dev); mdelay(1); if (ofdm_power_level > 35) ofdm_power_level = 35; if (priv->up == 0) { write_phy_ofdm(dev, 2, 0x42); write_phy_ofdm(dev, 5, 0x00); write_phy_ofdm(dev, 6, 0x40); write_phy_ofdm(dev, 7, 0x00); write_phy_ofdm(dev, 8, 0x40); } write_nic_byte(dev, OFDM_TXAGC, ofdm_power_level); if (ofdm_power_level <= 11) { write_phy_ofdm(dev, 0x07, 0x5c); write_phy_ofdm(dev, 0x09, 0x5c); } if (ofdm_power_level <= 17) { write_phy_ofdm(dev, 0x07, 0x54); write_phy_ofdm(dev, 0x09, 0x54); } else { write_phy_ofdm(dev, 0x07, 0x50); write_phy_ofdm(dev, 0x09, 0x50); } force_pci_posting(dev); mdelay(1); } void rtl8225z2_rf_set_chan(struct net_device *dev, short ch) { rtl8225z2_SetTXPowerLevel(dev, ch); RF_WriteReg(dev, 0x7, rtl8225_chan[ch]); if ((RF_ReadReg(dev, 0x7) & 0x0F80) != rtl8225_chan[ch]) RF_WriteReg(dev, 0x7, rtl8225_chan[ch]); mdelay(1); force_pci_posting(dev); mdelay(10); } static void rtl8225_host_pci_init(struct net_device *dev) { write_nic_word(dev, RFPinsOutput, 0x480); rtl8185_rf_pins_enable(dev); write_nic_word(dev, RFPinsSelect, 0x88 | SW_CONTROL_GPIO); write_nic_byte(dev, GP_ENABLE, 0); force_pci_posting(dev); mdelay(200); /* bit 6 is for RF on/off detection */ write_nic_word(dev, GP_ENABLE, 0xff & (~(1 << 6))); } void rtl8225z2_rf_init(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); int i; short channel = 1; u16 brsr; u32 data; priv->chan = channel; rtl8225_host_pci_init(dev); write_nic_dword(dev, RF_TIMING, 0x000a8008); brsr = read_nic_word(dev, BRSR); write_nic_word(dev, BRSR, 0xffff); write_nic_dword(dev, RF_PARA, 0x100044); rtl8180_set_mode(dev, EPROM_CMD_CONFIG); write_nic_byte(dev, CONFIG3, 0x44); rtl8180_set_mode(dev, EPROM_CMD_NORMAL); rtl8185_rf_pins_enable(dev); write_rtl8225(dev, 0x0, 0x2bf); mdelay(1); write_rtl8225(dev, 0x1, 0xee0); mdelay(1); write_rtl8225(dev, 0x2, 0x44d); mdelay(1); write_rtl8225(dev, 0x3, 0x441); mdelay(1); write_rtl8225(dev, 0x4, 0x8c3); mdelay(1); write_rtl8225(dev, 0x5, 0xc72); mdelay(1); write_rtl8225(dev, 0x6, 0xe6); mdelay(1); write_rtl8225(dev, 0x7, rtl8225_chan[channel]); mdelay(1); write_rtl8225(dev, 0x8, 0x3f); mdelay(1); write_rtl8225(dev, 0x9, 0x335); mdelay(1); write_rtl8225(dev, 0xa, 0x9d4); mdelay(1); write_rtl8225(dev, 0xb, 0x7bb); mdelay(1); write_rtl8225(dev, 0xc, 0x850); mdelay(1); write_rtl8225(dev, 0xd, 0xcdf); mdelay(1); write_rtl8225(dev, 0xe, 0x2b); mdelay(1); write_rtl8225(dev, 0xf, 0x114); mdelay(100); write_rtl8225(dev, 0x0, 0x1b7); for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) { write_rtl8225(dev, 0x1, i + 1); write_rtl8225(dev, 0x2, rtl8225z2_rxgain[i]); } write_rtl8225(dev, 0x3, 0x80); write_rtl8225(dev, 0x5, 0x4); write_rtl8225(dev, 0x0, 0xb7); write_rtl8225(dev, 0x2, 0xc4d); /* FIXME!! rtl8187 we have to check if calibrarion * is successful and eventually cal. again (repeat * the two write on reg 2) */ data = read_rtl8225(dev, 6); if (!(data & 0x00000080)) { write_rtl8225(dev, 0x02, 0x0c4d); force_pci_posting(dev); mdelay(200); write_rtl8225(dev, 0x02, 0x044d); force_pci_posting(dev); mdelay(100); data = read_rtl8225(dev, 6); if (!(data & 0x00000080)) DMESGW("RF Calibration Failed!!!!\n"); } mdelay(200); write_rtl8225(dev, 0x0, 0x2bf); for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { write_phy_ofdm(dev, 0xb, rtl8225_agc[i]); mdelay(1); /* enable writing AGC table */ write_phy_ofdm(dev, 0xa, i + 0x80); mdelay(1); } force_pci_posting(dev); mdelay(1); write_phy_ofdm(dev, 0x00, 0x01); mdelay(1); write_phy_ofdm(dev, 0x01, 0x02); mdelay(1); write_phy_ofdm(dev, 0x02, 0x62); mdelay(1); write_phy_ofdm(dev, 0x03, 0x00); mdelay(1); write_phy_ofdm(dev, 0x04, 0x00); mdelay(1); write_phy_ofdm(dev, 0x05, 0x00); mdelay(1); write_phy_ofdm(dev, 0x06, 0x40); mdelay(1); write_phy_ofdm(dev, 0x07, 0x00); mdelay(1); write_phy_ofdm(dev, 0x08, 0x40); mdelay(1); write_phy_ofdm(dev, 0x09, 0xfe); mdelay(1); write_phy_ofdm(dev, 0x0a, 0x08); mdelay(1); write_phy_ofdm(dev, 0x0b, 0x80); mdelay(1); write_phy_ofdm(dev, 0x0c, 0x01); mdelay(1); write_phy_ofdm(dev, 0x0d, 0x43); write_phy_ofdm(dev, 0x0e, 0xd3); mdelay(1); write_phy_ofdm(dev, 0x0f, 0x38); mdelay(1); write_phy_ofdm(dev, 0x10, 0x84); mdelay(1); write_phy_ofdm(dev, 0x11, 0x07); mdelay(1); write_phy_ofdm(dev, 0x12, 0x20); mdelay(1); write_phy_ofdm(dev, 0x13, 0x20); mdelay(1); write_phy_ofdm(dev, 0x14, 0x00); mdelay(1); write_phy_ofdm(dev, 0x15, 0x40); mdelay(1); write_phy_ofdm(dev, 0x16, 0x00); mdelay(1); write_phy_ofdm(dev, 0x17, 0x40); mdelay(1); write_phy_ofdm(dev, 0x18, 0xef); mdelay(1); write_phy_ofdm(dev, 0x19, 0x19); mdelay(1); write_phy_ofdm(dev, 0x1a, 0x20); mdelay(1); write_phy_ofdm(dev, 0x1b, 0x15); mdelay(1); write_phy_ofdm(dev, 0x1c, 0x04); mdelay(1); write_phy_ofdm(dev, 0x1d, 0xc5); mdelay(1); write_phy_ofdm(dev, 0x1e, 0x95); mdelay(1); write_phy_ofdm(dev, 0x1f, 0x75); mdelay(1); write_phy_ofdm(dev, 0x20, 0x1f); mdelay(1); write_phy_ofdm(dev, 0x21, 0x17); mdelay(1); write_phy_ofdm(dev, 0x22, 0x16); mdelay(1); write_phy_ofdm(dev, 0x23, 0x80); mdelay(1); /* FIXME maybe not needed */ write_phy_ofdm(dev, 0x24, 0x46); mdelay(1); write_phy_ofdm(dev, 0x25, 0x00); mdelay(1); write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); write_phy_ofdm(dev, 0x27, 0x88); mdelay(1); rtl8225z2_set_gain(dev, 4); write_phy_cck(dev, 0x0, 0x98); mdelay(1); write_phy_cck(dev, 0x3, 0x20); mdelay(1); write_phy_cck(dev, 0x4, 0x7e); mdelay(1); write_phy_cck(dev, 0x5, 0x12); mdelay(1); write_phy_cck(dev, 0x6, 0xfc); mdelay(1); write_phy_cck(dev, 0x7, 0x78); mdelay(1); write_phy_cck(dev, 0x8, 0x2e); mdelay(1); write_phy_cck(dev, 0x10, 0x93); mdelay(1); write_phy_cck(dev, 0x11, 0x88); mdelay(1); write_phy_cck(dev, 0x12, 0x47); mdelay(1); write_phy_cck(dev, 0x13, 0xd0); write_phy_cck(dev, 0x19, 0x00); write_phy_cck(dev, 0x1a, 0xa0); write_phy_cck(dev, 0x1b, 0x08); write_phy_cck(dev, 0x40, 0x86); /* CCK Carrier Sense Threshold */ write_phy_cck(dev, 0x41, 0x8d); mdelay(1); write_phy_cck(dev, 0x42, 0x15); mdelay(1); write_phy_cck(dev, 0x43, 0x18); mdelay(1); write_phy_cck(dev, 0x44, 0x36); mdelay(1); write_phy_cck(dev, 0x45, 0x35); mdelay(1); write_phy_cck(dev, 0x46, 0x2e); mdelay(1); write_phy_cck(dev, 0x47, 0x25); mdelay(1); write_phy_cck(dev, 0x48, 0x1c); mdelay(1); write_phy_cck(dev, 0x49, 0x12); mdelay(1); write_phy_cck(dev, 0x4a, 0x09); mdelay(1); write_phy_cck(dev, 0x4b, 0x04); mdelay(1); write_phy_cck(dev, 0x4c, 0x05); mdelay(1); write_nic_byte(dev, 0x5b, 0x0d); mdelay(1); rtl8225z2_SetTXPowerLevel(dev, channel); /* RX antenna default to A */ write_phy_cck(dev, 0x11, 0x9b); mdelay(1); /* B: 0xDB */ write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); /* B: 0x10 */ rtl8185_tx_antenna(dev, 0x03); /* B: 0x00 */ /* switch to high-speed 3-wire * last digit. 2 for both cck and ofdm */ write_nic_dword(dev, 0x94, 0x15c00002); rtl8185_rf_pins_enable(dev); rtl8225z2_rf_set_chan(dev, priv->chan); } void rtl8225z2_rf_set_mode(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->mode == IEEE_A) { write_rtl8225(dev, 0x5, 0x1865); write_nic_dword(dev, RF_PARA, 0x10084); write_nic_dword(dev, RF_TIMING, 0xa8008); write_phy_ofdm(dev, 0x0, 0x0); write_phy_ofdm(dev, 0xa, 0x6); write_phy_ofdm(dev, 0xb, 0x99); write_phy_ofdm(dev, 0xf, 0x20); write_phy_ofdm(dev, 0x11, 0x7); rtl8225z2_set_gain(dev, 4); write_phy_ofdm(dev, 0x15, 0x40); write_phy_ofdm(dev, 0x17, 0x40); write_nic_dword(dev, 0x94, 0x10000000); } else { write_rtl8225(dev, 0x5, 0x1864); write_nic_dword(dev, RF_PARA, 0x10044); write_nic_dword(dev, RF_TIMING, 0xa8008); write_phy_ofdm(dev, 0x0, 0x1); write_phy_ofdm(dev, 0xa, 0x6); write_phy_ofdm(dev, 0xb, 0x99); write_phy_ofdm(dev, 0xf, 0x20); write_phy_ofdm(dev, 0x11, 0x7); rtl8225z2_set_gain(dev, 4); write_phy_ofdm(dev, 0x15, 0x40); write_phy_ofdm(dev, 0x17, 0x40); write_nic_dword(dev, 0x94, 0x04000002); } } #define MAX_DOZE_WAITING_TIMES_85B 20 #define MAX_POLLING_24F_TIMES_87SE 10 #define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5 bool SetZebraRFPowerState8185(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState) { struct r8180_priv *priv = ieee80211_priv(dev); u8 btCR9346, btConfig3; bool bActionAllowed = true, bTurnOffBB = true; u8 u1bTmp; int i; bool bResult = true; u8 QueueID; if (priv->SetRFPowerStateInProgress == true) return false; priv->SetRFPowerStateInProgress = true; btCR9346 = read_nic_byte(dev, CR9346); write_nic_byte(dev, CR9346, (btCR9346 | 0xC0)); btConfig3 = read_nic_byte(dev, CONFIG3); write_nic_byte(dev, CONFIG3, (btConfig3 | CONFIG3_PARM_En)); switch (eRFPowerState) { case eRfOn: write_nic_word(dev, 0x37C, 0x00EC); /* turn on AFE */ write_nic_byte(dev, 0x54, 0x00); write_nic_byte(dev, 0x62, 0x00); /* turn on RF */ RF_WriteReg(dev, 0x0, 0x009f); udelay(500); RF_WriteReg(dev, 0x4, 0x0972); udelay(500); /* turn on RF again */ RF_WriteReg(dev, 0x0, 0x009f); udelay(500); RF_WriteReg(dev, 0x4, 0x0972); udelay(500); /* turn on BB */ write_phy_ofdm(dev, 0x10, 0x40); write_phy_ofdm(dev, 0x12, 0x40); /* Avoid power down at init time. */ write_nic_byte(dev, CONFIG4, priv->RFProgType); u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp & (~(BIT5 | BIT6)))); break; case eRfSleep: for (QueueID = 0, i = 0; QueueID < 6;) { if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) { QueueID++; continue; } else { priv->TxPollingTimes++; if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) { bActionAllowed = false; break; } else udelay(10); } } if (bActionAllowed) { /* turn off BB RXIQ matrix to cut off rx signal */ write_phy_ofdm(dev, 0x10, 0x00); write_phy_ofdm(dev, 0x12, 0x00); /* turn off RF */ RF_WriteReg(dev, 0x4, 0x0000); RF_WriteReg(dev, 0x0, 0x0000); /* turn off AFE except PLL */ write_nic_byte(dev, 0x62, 0xff); write_nic_byte(dev, 0x54, 0xec); mdelay(1); { int i = 0; while (true) { u8 tmp24F = read_nic_byte(dev, 0x24f); if ((tmp24F == 0x01) || (tmp24F == 0x09)) { bTurnOffBB = true; break; } else { udelay(10); i++; priv->TxPollingTimes++; if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) { bTurnOffBB = false; break; } else udelay(10); } } } if (bTurnOffBB) { /* turn off BB */ u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6)); /* turn off AFE PLL */ write_nic_byte(dev, 0x54, 0xFC); write_nic_word(dev, 0x37C, 0x00FC); } } break; case eRfOff: for (QueueID = 0, i = 0; QueueID < 6;) { if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) { QueueID++; continue; } else { udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_85B) break; } /* turn off BB RXIQ matrix to cut off rx signal */ write_phy_ofdm(dev, 0x10, 0x00); write_phy_ofdm(dev, 0x12, 0x00); /* turn off RF */ RF_WriteReg(dev, 0x4, 0x0000); RF_WriteReg(dev, 0x0, 0x0000); /* turn off AFE except PLL */ write_nic_byte(dev, 0x62, 0xff); write_nic_byte(dev, 0x54, 0xec); mdelay(1); { int i = 0; while (true) { u8 tmp24F = read_nic_byte(dev, 0x24f); if ((tmp24F == 0x01) || (tmp24F == 0x09)) { bTurnOffBB = true; break; } else { bTurnOffBB = false; udelay(10); i++; } if (i > MAX_POLLING_24F_TIMES_87SE) break; } } if (bTurnOffBB) { /* turn off BB */ u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6)); /* turn off AFE PLL (80M) */ write_nic_byte(dev, 0x54, 0xFC); write_nic_word(dev, 0x37C, 0x00FC); } break; } btConfig3 &= ~(CONFIG3_PARM_En); write_nic_byte(dev, CONFIG3, btConfig3); btCR9346 &= ~(0xC0); write_nic_byte(dev, CR9346, btCR9346); if (bResult && bActionAllowed) priv->eRFPowerState = eRFPowerState; priv->SetRFPowerStateInProgress = false; return bResult && bActionAllowed; } void rtl8225z4_rf_sleep(struct net_device *dev) { MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS); } void rtl8225z4_rf_wakeup(struct net_device *dev) { MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS); }
gpl-2.0
aopp/android_kernel_nvidia_shieldtablet
drivers/input/keyboard/nomadik-ske-keypad.c
2314
10870
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson * * License terms:GNU General Public License (GPL) version 2 * * Keypad controller driver for the SKE (Scroll Key Encoder) module used in * the Nomadik 8815 and Ux500 platforms. */ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/platform_data/keypad-nomadik-ske.h> /* SKE_CR bits */ #define SKE_KPMLT (0x1 << 6) #define SKE_KPCN (0x7 << 3) #define SKE_KPASEN (0x1 << 2) #define SKE_KPASON (0x1 << 7) /* SKE_IMSC bits */ #define SKE_KPIMA (0x1 << 2) /* SKE_ICR bits */ #define SKE_KPICS (0x1 << 3) #define SKE_KPICA (0x1 << 2) /* SKE_RIS bits */ #define SKE_KPRISA (0x1 << 2) #define SKE_KEYPAD_ROW_SHIFT 3 #define SKE_KPD_NUM_ROWS 8 #define SKE_KPD_NUM_COLS 8 /* keypad auto scan registers */ #define SKE_ASR0 0x20 #define SKE_ASR1 0x24 #define SKE_ASR2 0x28 #define SKE_ASR3 0x2C #define SKE_NUM_ASRX_REGISTERS (4) #define KEY_PRESSED_DELAY 10 /** * struct ske_keypad - data structure used by keypad driver * @irq: irq no * @reg_base: ske regsiters base address * @input: pointer to input device object * @board: keypad platform device * @keymap: matrix scan code table for keycodes * @clk: clock structure pointer */ struct ske_keypad { int irq; void __iomem *reg_base; struct input_dev *input; const struct ske_keypad_platform_data *board; unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS]; struct clk *clk; struct clk *pclk; spinlock_t ske_keypad_lock; }; static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr, u8 mask, u8 data) { u32 ret; spin_lock(&keypad->ske_keypad_lock); ret = readl(keypad->reg_base + addr); ret &= ~mask; ret |= data; writel(ret, keypad->reg_base + addr); spin_unlock(&keypad->ske_keypad_lock); } /* * ske_keypad_chip_init: init keypad controller configuration * * Enable Multi key press detection, auto scan mode */ static int __init ske_keypad_chip_init(struct ske_keypad *keypad) { u32 value; int timeout = keypad->board->debounce_ms; /* check SKE_RIS to be 0 */ while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--) cpu_relax(); if (!timeout) return -EINVAL; /* * set debounce value * keypad dbounce is configured in DBCR[15:8] * dbounce value in steps of 32/32.768 ms */ spin_lock(&keypad->ske_keypad_lock); value = readl(keypad->reg_base + SKE_DBCR); value = value & 0xff; value |= ((keypad->board->debounce_ms * 32000)/32768) << 8; writel(value, keypad->reg_base + SKE_DBCR); spin_unlock(&keypad->ske_keypad_lock); /* enable multi key detection */ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT); /* * set up the number of columns * KPCN[5:3] defines no. of keypad columns to be auto scanned */ value = (keypad->board->kcol - 1) << 3; ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value); /* clear keypad interrupt for auto(and pending SW) scans */ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS); /* un-mask keypad interrupts */ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); /* enable automatic scan */ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN); return 0; } static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col) { int row = 0, code, pos; struct input_dev *input = keypad->input; u32 ske_ris; int key_pressed; int num_of_rows; /* find out the row */ num_of_rows = hweight8(status); do { pos = __ffs(status); row = pos; status &= ~(1 << pos); code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT); ske_ris = readl(keypad->reg_base + SKE_RIS); key_pressed = ske_ris & SKE_KPRISA; input_event(input, EV_MSC, MSC_SCAN, code); input_report_key(input, keypad->keymap[code], key_pressed); input_sync(input); num_of_rows--; } while (num_of_rows); } static void ske_keypad_read_data(struct ske_keypad *keypad) { u8 status; int col = 0; int ske_asr, i; /* * Read the auto scan registers * * Each SKE_ASRx (x=0 to x=3) contains two row values. * lower byte contains row value for column 2*x, * upper byte contains row value for column 2*x + 1 */ for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) { ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i)); if (!ske_asr) continue; /* now that ASRx is zero, find out the coloumn x and row y */ status = ske_asr & 0xff; if (status) { col = i * 2; ske_keypad_report(keypad, status, col); } status = (ske_asr & 0xff00) >> 8; if (status) { col = (i * 2) + 1; ske_keypad_report(keypad, status, col); } } } static irqreturn_t ske_keypad_irq(int irq, void *dev_id) { struct ske_keypad *keypad = dev_id; int timeout = keypad->board->debounce_ms; /* disable auto scan interrupt; mask the interrupt generated */ ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA); while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --timeout) cpu_relax(); /* SKEx registers are stable and can be read */ ske_keypad_read_data(keypad); /* wait until raw interrupt is clear */ while ((readl(keypad->reg_base + SKE_RIS)) && --timeout) msleep(KEY_PRESSED_DELAY); /* enable auto scan interrupts */ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); return IRQ_HANDLED; } static int __init ske_keypad_probe(struct platform_device *pdev) { const struct ske_keypad_platform_data *plat = pdev->dev.platform_data; struct ske_keypad *keypad; struct input_dev *input; struct resource *res; int irq; int error; if (!plat) { dev_err(&pdev->dev, "invalid keypad platform data\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get keypad irq\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "missing platform resources\n"); return -EINVAL; } keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL); input = input_allocate_device(); if (!keypad || !input) { dev_err(&pdev->dev, "failed to allocate keypad memory\n"); error = -ENOMEM; goto err_free_mem; } keypad->irq = irq; keypad->board = plat; keypad->input = input; spin_lock_init(&keypad->ske_keypad_lock); if (!request_mem_region(res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto err_free_mem; } keypad->reg_base = ioremap(res->start, resource_size(res)); if (!keypad->reg_base) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto err_free_mem_region; } keypad->pclk = clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(keypad->pclk)) { dev_err(&pdev->dev, "failed to get pclk\n"); error = PTR_ERR(keypad->pclk); goto err_iounmap; } keypad->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "failed to get clk\n"); error = PTR_ERR(keypad->clk); goto err_pclk; } input->id.bustype = BUS_HOST; input->name = "ux500-ske-keypad"; input->dev.parent = &pdev->dev; error = matrix_keypad_build_keymap(plat->keymap_data, NULL, SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS, keypad->keymap, input); if (error) { dev_err(&pdev->dev, "Failed to build keymap\n"); goto err_clk; } input_set_capability(input, EV_MSC, MSC_SCAN); if (!plat->no_autorepeat) __set_bit(EV_REP, input->evbit); error = clk_prepare_enable(keypad->pclk); if (error) { dev_err(&pdev->dev, "Failed to prepare/enable pclk\n"); goto err_clk; } error = clk_prepare_enable(keypad->clk); if (error) { dev_err(&pdev->dev, "Failed to prepare/enable clk\n"); goto err_pclk_disable; } /* go through board initialization helpers */ if (keypad->board->init) keypad->board->init(); error = ske_keypad_chip_init(keypad); if (error) { dev_err(&pdev->dev, "unable to init keypad hardware\n"); goto err_clk_disable; } error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq, IRQF_ONESHOT, "ske-keypad", keypad); if (error) { dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq); goto err_clk_disable; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device: %d\n", error); goto err_free_irq; } if (plat->wakeup_enable) device_init_wakeup(&pdev->dev, true); platform_set_drvdata(pdev, keypad); return 0; err_free_irq: free_irq(keypad->irq, keypad); err_clk_disable: clk_disable_unprepare(keypad->clk); err_pclk_disable: clk_disable_unprepare(keypad->pclk); err_clk: clk_put(keypad->clk); err_pclk: clk_put(keypad->pclk); err_iounmap: iounmap(keypad->reg_base); err_free_mem_region: release_mem_region(res->start, resource_size(res)); err_free_mem: input_free_device(input); kfree(keypad); return error; } static int ske_keypad_remove(struct platform_device *pdev) { struct ske_keypad *keypad = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); free_irq(keypad->irq, keypad); input_unregister_device(keypad->input); clk_disable_unprepare(keypad->clk); clk_put(keypad->clk); if (keypad->board->exit) keypad->board->exit(); iounmap(keypad->reg_base); release_mem_region(res->start, resource_size(res)); kfree(keypad); return 0; } #ifdef CONFIG_PM_SLEEP static int ske_keypad_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ske_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); if (device_may_wakeup(dev)) enable_irq_wake(irq); else ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); return 0; } static int ske_keypad_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ske_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); if (device_may_wakeup(dev)) disable_irq_wake(irq); else ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); return 0; } #endif static SIMPLE_DEV_PM_OPS(ske_keypad_dev_pm_ops, ske_keypad_suspend, ske_keypad_resume); static struct platform_driver ske_keypad_driver = { .driver = { .name = "nmk-ske-keypad", .owner = THIS_MODULE, .pm = &ske_keypad_dev_pm_ops, }, .remove = ske_keypad_remove, }; module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>"); MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver"); MODULE_ALIAS("platform:nomadik-ske-keypad");
gpl-2.0
drod2169/Linux-3.10.x
drivers/spi/spi-pxa2xx-pxadma.c
2570
13772
/* * PXA2xx SPI private DMA support. * * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/pxa2xx_ssp.h> #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> #include "spi-pxa2xx.h" #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) bool pxa2xx_spi_dma_is_possible(size_t len) { /* Try to map dma buffer and do a dma transfer if successful, but * only if the length is non-zero and less than MAX_DMA_LEN. * * Zero-length non-descriptor DMA is illegal on PXA2xx; force use * of PIO instead. Care is needed above because the transfer may * have have been passed with buffers that are already dma mapped. * A zero-length transfer in PIO mode will not try to write/read * to/from the buffers * * REVISIT large transfers are exactly where we most want to be * using DMA. If this happens much, split those transfers into * multiple DMA segments rather than forcing PIO. */ return len > 0 && len <= MAX_DMA_LEN; } int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct device *dev = &msg->spi->dev; if (!drv_data->cur_chip->enable_dma) return 0; if (msg->is_dma_mapped) return drv_data->rx_dma && drv_data->tx_dma; if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) return 0; /* Modify setup if rx buffer is null */ if (drv_data->rx == NULL) { *drv_data->null_dma_buf = 0; drv_data->rx = drv_data->null_dma_buf; drv_data->rx_map_len = 4; } else drv_data->rx_map_len = drv_data->len; /* Modify setup if tx buffer is null */ if (drv_data->tx == NULL) { *drv_data->null_dma_buf = 0; drv_data->tx = drv_data->null_dma_buf; drv_data->tx_map_len = 4; } else drv_data->tx_map_len = drv_data->len; /* Stream map the tx buffer. Always do DMA_TO_DEVICE first * so we flush the cache *before* invalidating it, in case * the tx and rx buffers overlap. */ drv_data->tx_dma = dma_map_single(dev, drv_data->tx, drv_data->tx_map_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, drv_data->tx_dma)) return 0; /* Stream map the rx buffer */ drv_data->rx_dma = dma_map_single(dev, drv_data->rx, drv_data->rx_map_len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, drv_data->rx_dma)) { dma_unmap_single(dev, drv_data->tx_dma, drv_data->tx_map_len, DMA_TO_DEVICE); return 0; } return 1; } static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) { struct device *dev; if (!drv_data->dma_mapped) return; if (!drv_data->cur_msg->is_dma_mapped) { dev = &drv_data->cur_msg->spi->dev; dma_unmap_single(dev, drv_data->rx_dma, drv_data->rx_map_len, DMA_FROM_DEVICE); dma_unmap_single(dev, drv_data->tx_dma, drv_data->tx_map_len, DMA_TO_DEVICE); } drv_data->dma_mapped = 0; } static int wait_ssp_rx_stall(void const __iomem *ioaddr) { unsigned long limit = loops_per_jiffy << 1; while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) cpu_relax(); return limit; } static int wait_dma_channel_stop(int channel) { unsigned long limit = loops_per_jiffy << 1; while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) cpu_relax(); return limit; } static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, const char *msg) { void __iomem *reg = drv_data->ioaddr; /* Stop and reset */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; write_SSSR_CS(drv_data, drv_data->clear_sr); write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); pxa2xx_spi_flush(drv_data); write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); pxa2xx_spi_unmap_dma_buffers(drv_data); dev_err(&drv_data->pdev->dev, "%s\n", msg); drv_data->cur_msg->state = ERROR_STATE; tasklet_schedule(&drv_data->pump_transfers); } static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; struct spi_message *msg = drv_data->cur_msg; /* Clear and disable interrupts on SSP and DMA channels*/ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); write_SSSR_CS(drv_data, drv_data->clear_sr); DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; if (wait_dma_channel_stop(drv_data->rx_channel) == 0) dev_err(&drv_data->pdev->dev, "dma_handler: dma rx channel stop failed\n"); if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) dev_err(&drv_data->pdev->dev, "dma_transfer: ssp rx stall failed\n"); pxa2xx_spi_unmap_dma_buffers(drv_data); /* update the buffer pointer for the amount completed in dma */ drv_data->rx += drv_data->len - (DCMD(drv_data->rx_channel) & DCMD_LENGTH); /* read trailing data from fifo, it does not matter how many * bytes are in the fifo just read until buffer is full * or fifo is empty, which ever occurs first */ drv_data->read(drv_data); /* return count of what was actually read */ msg->actual_length += drv_data->len - (drv_data->rx_end - drv_data->rx); /* Transfer delays and chip select release are * handled in pump_transfers or giveback */ /* Move to next transfer */ msg->state = pxa2xx_spi_next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } void pxa2xx_spi_dma_handler(int channel, void *data) { struct driver_data *drv_data = data; u32 irq_status = DCSR(channel) & DMA_INT_MASK; if (irq_status & DCSR_BUSERR) { if (channel == drv_data->tx_channel) pxa2xx_spi_dma_error_stop(drv_data, "dma_handler: bad bus address on tx channel"); else pxa2xx_spi_dma_error_stop(drv_data, "dma_handler: bad bus address on rx channel"); return; } /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ if ((channel == drv_data->tx_channel) && (irq_status & DCSR_ENDINTR) && (drv_data->ssp_type == PXA25x_SSP)) { /* Wait for rx to stall */ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) dev_err(&drv_data->pdev->dev, "dma_handler: ssp rx stall failed\n"); /* finish this transfer, start the next */ pxa2xx_spi_dma_transfer_complete(drv_data); } } irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) { u32 irq_status; void __iomem *reg = drv_data->ioaddr; irq_status = read_SSSR(reg) & drv_data->mask_sr; if (irq_status & SSSR_ROR) { pxa2xx_spi_dma_error_stop(drv_data, "dma_transfer: fifo overrun"); return IRQ_HANDLED; } /* Check for false positive timeout */ if ((irq_status & SSSR_TINT) && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { write_SSSR(SSSR_TINT, reg); return IRQ_HANDLED; } if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { /* Clear and disable timeout interrupt, do the rest in * dma_transfer_complete */ if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); /* finish this transfer, start the next */ pxa2xx_spi_dma_transfer_complete(drv_data); return IRQ_HANDLED; } /* Opps problem detected */ return IRQ_NONE; } int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) { u32 dma_width; switch (drv_data->n_bytes) { case 1: dma_width = DCMD_WIDTH1; break; case 2: dma_width = DCMD_WIDTH2; break; default: dma_width = DCMD_WIDTH4; break; } /* Setup rx DMA Channel */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; DTADR(drv_data->rx_channel) = drv_data->rx_dma; if (drv_data->rx == drv_data->null_dma_buf) /* No target address increment */ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | dma_width | dma_burst | drv_data->len; else DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | DCMD_FLOWSRC | dma_width | dma_burst | drv_data->len; /* Setup tx DMA Channel */ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DSADR(drv_data->tx_channel) = drv_data->tx_dma; DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; if (drv_data->tx == drv_data->null_dma_buf) /* No source address increment */ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | dma_width | dma_burst | drv_data->len; else DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | DCMD_FLOWTRG | dma_width | dma_burst | drv_data->len; /* Enable dma end irqs on SSP to detect end of transfer */ if (drv_data->ssp_type == PXA25x_SSP) DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; return 0; } void pxa2xx_spi_dma_start(struct driver_data *drv_data) { DCSR(drv_data->rx_channel) |= DCSR_RUN; DCSR(drv_data->tx_channel) |= DCSR_RUN; } int pxa2xx_spi_dma_setup(struct driver_data *drv_data) { struct device *dev = &drv_data->pdev->dev; struct ssp_device *ssp = drv_data->ssp; /* Get two DMA channels (rx and tx) */ drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", DMA_PRIO_HIGH, pxa2xx_spi_dma_handler, drv_data); if (drv_data->rx_channel < 0) { dev_err(dev, "problem (%d) requesting rx channel\n", drv_data->rx_channel); return -ENODEV; } drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", DMA_PRIO_MEDIUM, pxa2xx_spi_dma_handler, drv_data); if (drv_data->tx_channel < 0) { dev_err(dev, "problem (%d) requesting tx channel\n", drv_data->tx_channel); pxa_free_dma(drv_data->rx_channel); return -ENODEV; } DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; return 0; } void pxa2xx_spi_dma_release(struct driver_data *drv_data) { struct ssp_device *ssp = drv_data->ssp; DRCMR(ssp->drcmr_rx) = 0; DRCMR(ssp->drcmr_tx) = 0; if (drv_data->tx_channel != 0) pxa_free_dma(drv_data->tx_channel); if (drv_data->rx_channel != 0) pxa_free_dma(drv_data->rx_channel); } void pxa2xx_spi_dma_resume(struct driver_data *drv_data) { if (drv_data->rx_channel != -1) DRCMR(drv_data->ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; if (drv_data->tx_channel != -1) DRCMR(drv_data->ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; } int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi, u8 bits_per_word, u32 *burst_code, u32 *threshold) { struct pxa2xx_spi_chip *chip_info = (struct pxa2xx_spi_chip *)spi->controller_data; int bytes_per_word; int burst_bytes; int thresh_words; int req_burst_size; int retval = 0; /* Set the threshold (in registers) to equal the same amount of data * as represented by burst size (in bytes). The computation below * is (burst_size rounded up to nearest 8 byte, word or long word) * divided by (bytes/register); the tx threshold is the inverse of * the rx, so that there will always be enough data in the rx fifo * to satisfy a burst, and there will always be enough space in the * tx fifo to accept a burst (a tx burst will overwrite the fifo if * there is not enough space), there must always remain enough empty * space in the rx fifo for any data loaded to the tx fifo. * Whenever burst_size (in bytes) equals bits/word, the fifo threshold * will be 8, or half the fifo; * The threshold can only be set to 2, 4 or 8, but not 16, because * to burst 16 to the tx fifo, the fifo would have to be empty; * however, the minimum fifo trigger level is 1, and the tx will * request service when the fifo is at this level, with only 15 spaces. */ /* find bytes/word */ if (bits_per_word <= 8) bytes_per_word = 1; else if (bits_per_word <= 16) bytes_per_word = 2; else bytes_per_word = 4; /* use struct pxa2xx_spi_chip->dma_burst_size if available */ if (chip_info) req_burst_size = chip_info->dma_burst_size; else { switch (chip->dma_burst_size) { default: /* if the default burst size is not set, * do it now */ chip->dma_burst_size = DCMD_BURST8; case DCMD_BURST8: req_burst_size = 8; break; case DCMD_BURST16: req_burst_size = 16; break; case DCMD_BURST32: req_burst_size = 32; break; } } if (req_burst_size <= 8) { *burst_code = DCMD_BURST8; burst_bytes = 8; } else if (req_burst_size <= 16) { if (bytes_per_word == 1) { /* don't burst more than 1/2 the fifo */ *burst_code = DCMD_BURST8; burst_bytes = 8; retval = 1; } else { *burst_code = DCMD_BURST16; burst_bytes = 16; } } else { if (bytes_per_word == 1) { /* don't burst more than 1/2 the fifo */ *burst_code = DCMD_BURST8; burst_bytes = 8; retval = 1; } else if (bytes_per_word == 2) { /* don't burst more than 1/2 the fifo */ *burst_code = DCMD_BURST16; burst_bytes = 16; retval = 1; } else { *burst_code = DCMD_BURST32; burst_bytes = 32; } } thresh_words = burst_bytes / bytes_per_word; /* thresh_words will be between 2 and 8 */ *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); return retval; }
gpl-2.0
PDi-Communication-Systems-Inc/kernel-imx
drivers/uio/uio_pci_generic.c
3082
5900
/* uio_pci_generic - generic UIO driver for PCI 2.3 devices * * Copyright (C) 2009 Red Hat, Inc. * Author: Michael S. Tsirkin <mst@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. * * Since the driver does not declare any device ids, you must allocate * id and bind the device to the driver yourself. For example: * * # echo "8086 10f5" > /sys/bus/pci/drivers/uio_pci_generic/new_id * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/uio_pci_generic/bind * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/uio_pci_generic * * Driver won't bind to devices which do not support the Interrupt Disable Bit * in the command register. All devices compliant to PCI 2.3 (circa 2002) and * all compliant PCI Express devices should support this bit. */ #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/uio_driver.h> #include <linux/spinlock.h> #define DRIVER_VERSION "0.01.0" #define DRIVER_AUTHOR "Michael S. Tsirkin <mst@redhat.com>" #define DRIVER_DESC "Generic UIO driver for PCI 2.3 devices" struct uio_pci_generic_dev { struct uio_info info; struct pci_dev *pdev; spinlock_t lock; /* guards command register accesses */ }; static inline struct uio_pci_generic_dev * to_uio_pci_generic_dev(struct uio_info *info) { return container_of(info, struct uio_pci_generic_dev, info); } /* Interrupt handler. Read/modify/write the command register to disable * the interrupt. */ static irqreturn_t irqhandler(int irq, struct uio_info *info) { struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info); struct pci_dev *pdev = gdev->pdev; irqreturn_t ret = IRQ_NONE; u32 cmd_status_dword; u16 origcmd, newcmd, status; /* We do a single dword read to retrieve both command and status. * Document assumptions that make this possible. */ BUILD_BUG_ON(PCI_COMMAND % 4); BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); spin_lock_irq(&gdev->lock); pci_block_user_cfg_access(pdev); /* Read both command and status registers in a single 32-bit operation. * Note: we could cache the value for command and move the status read * out of the lock if there was a way to get notified of user changes * to command register through sysfs. Should be good for shared irqs. */ pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword); origcmd = cmd_status_dword; status = cmd_status_dword >> 16; /* Check interrupt status register to see whether our device * triggered the interrupt. */ if (!(status & PCI_STATUS_INTERRUPT)) goto done; /* We triggered the interrupt, disable it. */ newcmd = origcmd | PCI_COMMAND_INTX_DISABLE; if (newcmd != origcmd) pci_write_config_word(pdev, PCI_COMMAND, newcmd); /* UIO core will signal the user process. */ ret = IRQ_HANDLED; done: pci_unblock_user_cfg_access(pdev); spin_unlock_irq(&gdev->lock); return ret; } /* Verify that the device supports Interrupt Disable bit in command register, * per PCI 2.3, by flipping this bit and reading it back: this bit was readonly * in PCI 2.2. */ static int __devinit verify_pci_2_3(struct pci_dev *pdev) { u16 orig, new; int err = 0; pci_block_user_cfg_access(pdev); pci_read_config_word(pdev, PCI_COMMAND, &orig); pci_write_config_word(pdev, PCI_COMMAND, orig ^ PCI_COMMAND_INTX_DISABLE); pci_read_config_word(pdev, PCI_COMMAND, &new); /* There's no way to protect against * hardware bugs or detect them reliably, but as long as we know * what the value should be, let's go ahead and check it. */ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { err = -EBUSY; dev_err(&pdev->dev, "Command changed from 0x%x to 0x%x: " "driver or HW bug?\n", orig, new); goto err; } if (!((new ^ orig) & PCI_COMMAND_INTX_DISABLE)) { dev_warn(&pdev->dev, "Device does not support " "disabling interrupts: unable to bind.\n"); err = -ENODEV; goto err; } /* Now restore the original value. */ pci_write_config_word(pdev, PCI_COMMAND, orig); err: pci_unblock_user_cfg_access(pdev); return err; } static int __devinit probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct uio_pci_generic_dev *gdev; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n", __func__, err); return err; } if (!pdev->irq) { dev_warn(&pdev->dev, "No IRQ assigned to device: " "no support for interrupts?\n"); pci_disable_device(pdev); return -ENODEV; } err = verify_pci_2_3(pdev); if (err) goto err_verify; gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL); if (!gdev) { err = -ENOMEM; goto err_alloc; } gdev->info.name = "uio_pci_generic"; gdev->info.version = DRIVER_VERSION; gdev->info.irq = pdev->irq; gdev->info.irq_flags = IRQF_SHARED; gdev->info.handler = irqhandler; gdev->pdev = pdev; spin_lock_init(&gdev->lock); if (uio_register_device(&pdev->dev, &gdev->info)) goto err_register; pci_set_drvdata(pdev, gdev); return 0; err_register: kfree(gdev); err_alloc: err_verify: pci_disable_device(pdev); return err; } static void remove(struct pci_dev *pdev) { struct uio_pci_generic_dev *gdev = pci_get_drvdata(pdev); uio_unregister_device(&gdev->info); pci_disable_device(pdev); kfree(gdev); } static struct pci_driver driver = { .name = "uio_pci_generic", .id_table = NULL, /* only dynamic id's */ .probe = probe, .remove = remove, }; static int __init init(void) { pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return pci_register_driver(&driver); } static void __exit cleanup(void) { pci_unregister_driver(&driver); } module_init(init); module_exit(cleanup); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
akshay4/android_old_kernel_htc_pico
drivers/uio/uio_pci_generic.c
3082
5900
/* uio_pci_generic - generic UIO driver for PCI 2.3 devices * * Copyright (C) 2009 Red Hat, Inc. * Author: Michael S. Tsirkin <mst@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. * * Since the driver does not declare any device ids, you must allocate * id and bind the device to the driver yourself. For example: * * # echo "8086 10f5" > /sys/bus/pci/drivers/uio_pci_generic/new_id * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/uio_pci_generic/bind * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/uio_pci_generic * * Driver won't bind to devices which do not support the Interrupt Disable Bit * in the command register. All devices compliant to PCI 2.3 (circa 2002) and * all compliant PCI Express devices should support this bit. */ #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/uio_driver.h> #include <linux/spinlock.h> #define DRIVER_VERSION "0.01.0" #define DRIVER_AUTHOR "Michael S. Tsirkin <mst@redhat.com>" #define DRIVER_DESC "Generic UIO driver for PCI 2.3 devices" struct uio_pci_generic_dev { struct uio_info info; struct pci_dev *pdev; spinlock_t lock; /* guards command register accesses */ }; static inline struct uio_pci_generic_dev * to_uio_pci_generic_dev(struct uio_info *info) { return container_of(info, struct uio_pci_generic_dev, info); } /* Interrupt handler. Read/modify/write the command register to disable * the interrupt. */ static irqreturn_t irqhandler(int irq, struct uio_info *info) { struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info); struct pci_dev *pdev = gdev->pdev; irqreturn_t ret = IRQ_NONE; u32 cmd_status_dword; u16 origcmd, newcmd, status; /* We do a single dword read to retrieve both command and status. * Document assumptions that make this possible. */ BUILD_BUG_ON(PCI_COMMAND % 4); BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); spin_lock_irq(&gdev->lock); pci_block_user_cfg_access(pdev); /* Read both command and status registers in a single 32-bit operation. * Note: we could cache the value for command and move the status read * out of the lock if there was a way to get notified of user changes * to command register through sysfs. Should be good for shared irqs. */ pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword); origcmd = cmd_status_dword; status = cmd_status_dword >> 16; /* Check interrupt status register to see whether our device * triggered the interrupt. */ if (!(status & PCI_STATUS_INTERRUPT)) goto done; /* We triggered the interrupt, disable it. */ newcmd = origcmd | PCI_COMMAND_INTX_DISABLE; if (newcmd != origcmd) pci_write_config_word(pdev, PCI_COMMAND, newcmd); /* UIO core will signal the user process. */ ret = IRQ_HANDLED; done: pci_unblock_user_cfg_access(pdev); spin_unlock_irq(&gdev->lock); return ret; } /* Verify that the device supports Interrupt Disable bit in command register, * per PCI 2.3, by flipping this bit and reading it back: this bit was readonly * in PCI 2.2. */ static int __devinit verify_pci_2_3(struct pci_dev *pdev) { u16 orig, new; int err = 0; pci_block_user_cfg_access(pdev); pci_read_config_word(pdev, PCI_COMMAND, &orig); pci_write_config_word(pdev, PCI_COMMAND, orig ^ PCI_COMMAND_INTX_DISABLE); pci_read_config_word(pdev, PCI_COMMAND, &new); /* There's no way to protect against * hardware bugs or detect them reliably, but as long as we know * what the value should be, let's go ahead and check it. */ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { err = -EBUSY; dev_err(&pdev->dev, "Command changed from 0x%x to 0x%x: " "driver or HW bug?\n", orig, new); goto err; } if (!((new ^ orig) & PCI_COMMAND_INTX_DISABLE)) { dev_warn(&pdev->dev, "Device does not support " "disabling interrupts: unable to bind.\n"); err = -ENODEV; goto err; } /* Now restore the original value. */ pci_write_config_word(pdev, PCI_COMMAND, orig); err: pci_unblock_user_cfg_access(pdev); return err; } static int __devinit probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct uio_pci_generic_dev *gdev; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n", __func__, err); return err; } if (!pdev->irq) { dev_warn(&pdev->dev, "No IRQ assigned to device: " "no support for interrupts?\n"); pci_disable_device(pdev); return -ENODEV; } err = verify_pci_2_3(pdev); if (err) goto err_verify; gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL); if (!gdev) { err = -ENOMEM; goto err_alloc; } gdev->info.name = "uio_pci_generic"; gdev->info.version = DRIVER_VERSION; gdev->info.irq = pdev->irq; gdev->info.irq_flags = IRQF_SHARED; gdev->info.handler = irqhandler; gdev->pdev = pdev; spin_lock_init(&gdev->lock); if (uio_register_device(&pdev->dev, &gdev->info)) goto err_register; pci_set_drvdata(pdev, gdev); return 0; err_register: kfree(gdev); err_alloc: err_verify: pci_disable_device(pdev); return err; } static void remove(struct pci_dev *pdev) { struct uio_pci_generic_dev *gdev = pci_get_drvdata(pdev); uio_unregister_device(&gdev->info); pci_disable_device(pdev); kfree(gdev); } static struct pci_driver driver = { .name = "uio_pci_generic", .id_table = NULL, /* only dynamic id's */ .probe = probe, .remove = remove, }; static int __init init(void) { pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return pci_register_driver(&driver); } static void __exit cleanup(void) { pci_unregister_driver(&driver); } module_init(init); module_exit(cleanup); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
NewbyJE/android_kernel_samsung_msm8660-common
drivers/i2c/busses/i2c-pnx.c
6410
19749
/* * Provides I2C support for Philips PNX010x/PNX4008 boards. * * Authors: Dennis Kovalev <dkovalev@ru.mvista.com> * Vitaly Wool <vwool@ru.mvista.com> * * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/timer.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/i2c-pnx.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/i2c.h> #define I2C_PNX_TIMEOUT 10 /* msec */ #define I2C_PNX_SPEED_KHZ 100 #define I2C_PNX_REGION_SIZE 0x100 static inline int wait_timeout(long timeout, struct i2c_pnx_algo_data *data) { while (timeout > 0 && (ioread32(I2C_REG_STS(data)) & mstatus_active)) { mdelay(1); timeout--; } return (timeout <= 0); } static inline int wait_reset(long timeout, struct i2c_pnx_algo_data *data) { while (timeout > 0 && (ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) { mdelay(1); timeout--; } return (timeout <= 0); } static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) { struct timer_list *timer = &alg_data->mif.timer; unsigned long expires = msecs_to_jiffies(I2C_PNX_TIMEOUT); if (expires <= 1) expires = 2; del_timer_sync(timer); dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n", jiffies, expires); timer->expires = jiffies + expires; timer->data = (unsigned long)alg_data; add_timer(timer); } /** * i2c_pnx_start - start a device * @slave_addr: slave address * @adap: pointer to adapter structure * * Generate a START signal in the desired mode. */ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_pnx_algo_data *alg_data) { dev_dbg(&alg_data->adapter.dev, "%s(): addr 0x%x mode %d\n", __func__, slave_addr, alg_data->mif.mode); /* Check for 7 bit slave addresses only */ if (slave_addr & ~0x7f) { dev_err(&alg_data->adapter.dev, "%s: Invalid slave address %x. Only 7-bit addresses are supported\n", alg_data->adapter.name, slave_addr); return -EINVAL; } /* First, make sure bus is idle */ if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) { /* Somebody else is monopolizing the bus */ dev_err(&alg_data->adapter.dev, "%s: Bus busy. Slave addr = %02x, cntrl = %x, stat = %x\n", alg_data->adapter.name, slave_addr, ioread32(I2C_REG_CTL(alg_data)), ioread32(I2C_REG_STS(alg_data))); return -EBUSY; } else if (ioread32(I2C_REG_STS(alg_data)) & mstatus_afi) { /* Sorry, we lost the bus */ dev_err(&alg_data->adapter.dev, "%s: Arbitration failure. Slave addr = %02x\n", alg_data->adapter.name, slave_addr); return -EIO; } /* * OK, I2C is enabled and we have the bus. * Clear the current TDI and AFI status flags. */ iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): sending %#x\n", __func__, (slave_addr << 1) | start_bit | alg_data->mif.mode); /* Write the slave address, START bit and R/W bit */ iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode, I2C_REG_TX(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): exit\n", __func__); return 0; } /** * i2c_pnx_stop - stop a device * @adap: pointer to I2C adapter structure * * Generate a STOP signal to terminate the master transaction. */ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data) { /* Only 1 msec max timeout due to interrupt context */ long timeout = 1000; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Write a STOP bit to TX FIFO */ iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data)); /* Wait until the STOP is seen. */ while (timeout > 0 && (ioread32(I2C_REG_STS(alg_data)) & mstatus_active)) { /* may be called from interrupt context */ udelay(1); timeout--; } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); } /** * i2c_pnx_master_xmit - transmit data to slave * @adap: pointer to I2C adapter structure * * Sends one byte of data to the slave */ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) { u32 val; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (alg_data->mif.len > 0) { /* We still have something to talk about... */ val = *alg_data->mif.buf++; if (alg_data->mif.len == 1) val |= stop_bit; alg_data->mif.len--; iowrite32(val, I2C_REG_TX(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): xmit %#x [%d]\n", __func__, val, alg_data->mif.len + 1); if (alg_data->mif.len == 0) { if (alg_data->last) { /* Wait until the STOP is seen. */ if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) dev_err(&alg_data->adapter.dev, "The bus is still active after timeout\n"); } /* Disable master interrupts */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine.\n", __func__); complete(&alg_data->mif.complete); } } else if (alg_data->mif.len == 0) { /* zero-sized transfer */ i2c_pnx_stop(alg_data); /* Disable master interrupts. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); /* Stop timer. */ del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine after zero-xfer.\n", __func__); complete(&alg_data->mif.complete); } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } /** * i2c_pnx_master_rcv - receive data from slave * @adap: pointer to I2C adapter structure * * Reads one byte data from the slave */ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) { unsigned int val = 0; u32 ctl = 0; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Check, whether there is already data, * or we didn't 'ask' for it yet. */ if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { dev_dbg(&alg_data->adapter.dev, "%s(): Write dummy data to fill Rx-fifo...\n", __func__); if (alg_data->mif.len == 1) { /* Last byte, do not acknowledge next rcv. */ val |= stop_bit; /* * Enable interrupt RFDAIE (data in Rx fifo), * and disable DRMIE (need data for Tx) */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl |= mcntrl_rffie | mcntrl_daie; ctl &= ~mcntrl_drmie; iowrite32(ctl, I2C_REG_CTL(alg_data)); } /* * Now we'll 'ask' for data: * For each byte we want to receive, we must * write a (dummy) byte to the Tx-FIFO. */ iowrite32(val, I2C_REG_TX(alg_data)); return 0; } /* Handle data. */ if (alg_data->mif.len > 0) { val = ioread32(I2C_REG_RX(alg_data)); *alg_data->mif.buf++ = (u8) (val & 0xff); dev_dbg(&alg_data->adapter.dev, "%s(): rcv 0x%x [%d]\n", __func__, val, alg_data->mif.len); alg_data->mif.len--; if (alg_data->mif.len == 0) { if (alg_data->last) /* Wait until the STOP is seen. */ if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) dev_err(&alg_data->adapter.dev, "The bus is still active after timeout\n"); /* Disable master interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Kill timer. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) { struct i2c_pnx_algo_data *alg_data = dev_id; u32 stat, ctl; dev_dbg(&alg_data->adapter.dev, "%s(): mstat = %x mctrl = %x, mode = %d\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data)), alg_data->mif.mode); stat = ioread32(I2C_REG_STS(alg_data)); /* let's see what kind of event this is */ if (stat & mstatus_afi) { /* We lost arbitration in the midst of a transfer */ alg_data->mif.ret = -EIO; /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ dev_dbg(&alg_data->adapter.dev, "%s(): Slave did not acknowledge, generating a STOP.\n", __func__); i2c_pnx_stop(alg_data); /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Our return value. */ alg_data->mif.ret = -EIO; /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* * Two options: * - Master Tx needs data. * - There is data in the Rx-fifo * The latter is only the case if we have requested for data, * via a dummy write. (See 'i2c_pnx_master_rcv'.) * We therefore check, as a sanity check, whether that interrupt * has been enabled. */ if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) { if (alg_data->mif.mode == I2C_SMBUS_WRITE) { i2c_pnx_master_xmit(alg_data); } else if (alg_data->mif.mode == I2C_SMBUS_READ) { i2c_pnx_master_rcv(alg_data); } } } /* Clear TDI and AFI bits */ stat = ioread32(I2C_REG_STS(alg_data)); iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x ctrl = %x.\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); return IRQ_HANDLED; } static void i2c_pnx_timeout(unsigned long data) { struct i2c_pnx_algo_data *alg_data = (struct i2c_pnx_algo_data *)data; u32 ctl; dev_err(&alg_data->adapter.dev, "Master timed out. stat = %04x, cntrl = %04x. Resetting master...\n", ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); /* Reset master and disable interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); ctl |= mcntrl_reset; iowrite32(ctl, I2C_REG_CTL(alg_data)); wait_reset(I2C_PNX_TIMEOUT, alg_data); alg_data->mif.ret = -EIO; complete(&alg_data->mif.complete); } static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) { u32 stat; if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_active) { dev_err(&alg_data->adapter.dev, "%s: Bus is still active after xfer. Reset it...\n", alg_data->adapter.name); iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(I2C_PNX_TIMEOUT, alg_data); } else if (!(stat & mstatus_rfe) || !(stat & mstatus_tfe)) { /* If there is data in the fifo's after transfer, * flush fifo's by reset. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(I2C_PNX_TIMEOUT, alg_data); } else if (stat & mstatus_nai) { iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(I2C_PNX_TIMEOUT, alg_data); } } /** * i2c_pnx_xfer - generic transfer entry point * @adap: pointer to I2C adapter structure * @msgs: array of messages * @num: number of messages * * Initiates the transfer */ static int i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; u32 stat = ioread32(I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): entering: %d messages, stat = %04x.\n", __func__, num, ioread32(I2C_REG_STS(alg_data))); bus_reset_if_active(alg_data); /* Process transactions in a loop. */ for (i = 0; rc >= 0 && i < num; i++) { u8 addr; pmsg = &msgs[i]; addr = pmsg->addr; if (pmsg->flags & I2C_M_TEN) { dev_err(&alg_data->adapter.dev, "%s: 10 bits addr not supported!\n", alg_data->adapter.name); rc = -EINVAL; break; } alg_data->mif.buf = pmsg->buf; alg_data->mif.len = pmsg->len; alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ? I2C_SMBUS_READ : I2C_SMBUS_WRITE; alg_data->mif.ret = 0; alg_data->last = (i == num - 1); dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); i2c_pnx_arm_timer(alg_data); /* initialize the completion var */ init_completion(&alg_data->mif.complete); /* Enable master interrupt */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie | mcntrl_naie | mcntrl_drmie, I2C_REG_CTL(alg_data)); /* Put start-code and slave-address on the bus. */ rc = i2c_pnx_start(addr, alg_data); if (rc < 0) break; /* Wait for completion */ wait_for_completion(&alg_data->mif.complete); if (!(rc = alg_data->mif.ret)) completed++; dev_dbg(&alg_data->adapter.dev, "%s(): Complete, return code = %d.\n", __func__, rc); /* Clear TDI and AFI bits in case they are set. */ if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) { dev_dbg(&alg_data->adapter.dev, "%s: TDI still set... clearing now.\n", alg_data->adapter.name); iowrite32(stat, I2C_REG_STS(alg_data)); } if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) { dev_dbg(&alg_data->adapter.dev, "%s: AFI still set... clearing now.\n", alg_data->adapter.name); iowrite32(stat, I2C_REG_STS(alg_data)); } } bus_reset_if_active(alg_data); /* Cleanup to be sure... */ alg_data->mif.buf = NULL; alg_data->mif.len = 0; dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (completed != num) return ((rc < 0) ? rc : -EREMOTEIO); return num; } static u32 i2c_pnx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm pnx_algorithm = { .master_xfer = i2c_pnx_xfer, .functionality = i2c_pnx_func, }; #ifdef CONFIG_PM static int i2c_pnx_controller_suspend(struct platform_device *pdev, pm_message_t state) { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); clk_disable(alg_data->clk); return 0; } static int i2c_pnx_controller_resume(struct platform_device *pdev) { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); return clk_enable(alg_data->clk); } #else #define i2c_pnx_controller_suspend NULL #define i2c_pnx_controller_resume NULL #endif static int __devinit i2c_pnx_probe(struct platform_device *pdev) { unsigned long tmp; int ret = 0; struct i2c_pnx_algo_data *alg_data; unsigned long freq; struct i2c_pnx_data *i2c_pnx = pdev->dev.platform_data; if (!i2c_pnx || !i2c_pnx->name) { dev_err(&pdev->dev, "%s: no platform data supplied\n", __func__); ret = -EINVAL; goto out; } alg_data = kzalloc(sizeof(*alg_data), GFP_KERNEL); if (!alg_data) { ret = -ENOMEM; goto err_kzalloc; } platform_set_drvdata(pdev, alg_data); strlcpy(alg_data->adapter.name, i2c_pnx->name, sizeof(alg_data->adapter.name)); alg_data->adapter.dev.parent = &pdev->dev; alg_data->adapter.algo = &pnx_algorithm; alg_data->adapter.algo_data = alg_data; alg_data->adapter.nr = pdev->id; alg_data->i2c_pnx = i2c_pnx; alg_data->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(alg_data->clk)) { ret = PTR_ERR(alg_data->clk); goto out_drvdata; } init_timer(&alg_data->mif.timer); alg_data->mif.timer.function = i2c_pnx_timeout; alg_data->mif.timer.data = (unsigned long)alg_data; /* Register I/O resource */ if (!request_mem_region(i2c_pnx->base, I2C_PNX_REGION_SIZE, pdev->name)) { dev_err(&pdev->dev, "I/O region 0x%08x for I2C already in use.\n", i2c_pnx->base); ret = -ENODEV; goto out_clkget; } alg_data->ioaddr = ioremap(i2c_pnx->base, I2C_PNX_REGION_SIZE); if (!alg_data->ioaddr) { dev_err(&pdev->dev, "Couldn't ioremap I2C I/O region\n"); ret = -ENOMEM; goto out_release; } ret = clk_enable(alg_data->clk); if (ret) goto out_unmap; freq = clk_get_rate(alg_data->clk); /* * Clock Divisor High This value is the number of system clocks * the serial clock (SCL) will be high. * For example, if the system clock period is 50 ns and the maximum * desired serial period is 10000 ns (100 kHz), then CLKHI would be * set to 0.5*(f_sys/f_i2c)-2=0.5*(20e6/100e3)-2=98. The actual value * programmed into CLKHI will vary from this slightly due to * variations in the output pad's rise and fall times as well as * the deglitching filter length. */ tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; if (tmp > 0x3FF) tmp = 0x3FF; iowrite32(tmp, I2C_REG_CKH(alg_data)); iowrite32(tmp, I2C_REG_CKL(alg_data)); iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data)); if (wait_reset(I2C_PNX_TIMEOUT, alg_data)) { ret = -ENODEV; goto out_clock; } init_completion(&alg_data->mif.complete); ret = request_irq(i2c_pnx->irq, i2c_pnx_interrupt, 0, pdev->name, alg_data); if (ret) goto out_clock; /* Register this adapter with the I2C subsystem */ ret = i2c_add_numbered_adapter(&alg_data->adapter); if (ret < 0) { dev_err(&pdev->dev, "I2C: Failed to add bus\n"); goto out_irq; } dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n", alg_data->adapter.name, i2c_pnx->base, i2c_pnx->irq); return 0; out_irq: free_irq(i2c_pnx->irq, alg_data); out_clock: clk_disable(alg_data->clk); out_unmap: iounmap(alg_data->ioaddr); out_release: release_mem_region(i2c_pnx->base, I2C_PNX_REGION_SIZE); out_clkget: clk_put(alg_data->clk); out_drvdata: kfree(alg_data); err_kzalloc: platform_set_drvdata(pdev, NULL); out: return ret; } static int __devexit i2c_pnx_remove(struct platform_device *pdev) { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); struct i2c_pnx_data *i2c_pnx = alg_data->i2c_pnx; free_irq(i2c_pnx->irq, alg_data); i2c_del_adapter(&alg_data->adapter); clk_disable(alg_data->clk); iounmap(alg_data->ioaddr); release_mem_region(i2c_pnx->base, I2C_PNX_REGION_SIZE); clk_put(alg_data->clk); kfree(alg_data); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver i2c_pnx_driver = { .driver = { .name = "pnx-i2c", .owner = THIS_MODULE, }, .probe = i2c_pnx_probe, .remove = __devexit_p(i2c_pnx_remove), .suspend = i2c_pnx_controller_suspend, .resume = i2c_pnx_controller_resume, }; static int __init i2c_adap_pnx_init(void) { return platform_driver_register(&i2c_pnx_driver); } static void __exit i2c_adap_pnx_exit(void) { platform_driver_unregister(&i2c_pnx_driver); } MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>"); MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pnx-i2c"); /* We need to make sure I2C is initialized before USB */ subsys_initcall(i2c_adap_pnx_init); module_exit(i2c_adap_pnx_exit);
gpl-2.0
Loller79/Solid_Kernel-STOCK-KK
arch/powerpc/lib/sstep.c
6666
38272
/* * Single-step support. * * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/prefetch.h> #include <asm/sstep.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/cputable.h> extern char system_call_common[]; #ifdef CONFIG_PPC64 /* Bits in SRR1 that are copied from MSR */ #define MSR_MASK 0xffffffff87c0ffffUL #else #define MSR_MASK 0x87c0ffff #endif /* Bits in XER */ #define XER_SO 0x80000000U #define XER_OV 0x40000000U #define XER_CA 0x20000000U #ifdef CONFIG_PPC_FPU /* * Functions in ldstfp.S */ extern int do_lfs(int rn, unsigned long ea); extern int do_lfd(int rn, unsigned long ea); extern int do_stfs(int rn, unsigned long ea); extern int do_stfd(int rn, unsigned long ea); extern int do_lvx(int rn, unsigned long ea); extern int do_stvx(int rn, unsigned long ea); extern int do_lxvd2x(int rn, unsigned long ea); extern int do_stxvd2x(int rn, unsigned long ea); #endif /* * Emulate the truncation of 64 bit values in 32-bit mode. */ static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) { #ifdef __powerpc64__ if ((msr & MSR_64BIT) == 0) val &= 0xffffffffUL; #endif return val; } /* * Determine whether a conditional branch instruction would branch. */ static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) { unsigned int bo = (instr >> 21) & 0x1f; unsigned int bi; if ((bo & 4) == 0) { /* decrement counter */ --regs->ctr; if (((bo >> 1) & 1) ^ (regs->ctr == 0)) return 0; } if ((bo & 0x10) == 0) { /* check bit from CR */ bi = (instr >> 16) & 0x1f; if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) return 0; } return 1; } static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb) { if (!user_mode(regs)) return 1; return __access_ok(ea, nb, USER_DS); } /* * Calculate effective address for a D-form instruction */ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs) { int ra; unsigned long ea; ra = (instr >> 16) & 0x1f; ea = (signed short) instr; /* sign-extend */ if (ra) { ea += regs->gpr[ra]; if (instr & 0x04000000) /* update forms */ regs->gpr[ra] = ea; } return truncate_if_32bit(regs->msr, ea); } #ifdef __powerpc64__ /* * Calculate effective address for a DS-form instruction */ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs) { int ra; unsigned long ea; ra = (instr >> 16) & 0x1f; ea = (signed short) (instr & ~3); /* sign-extend */ if (ra) { ea += regs->gpr[ra]; if ((instr & 3) == 1) /* update forms */ regs->gpr[ra] = ea; } return truncate_if_32bit(regs->msr, ea); } #endif /* __powerpc64 */ /* * Calculate effective address for an X-form instruction */ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs, int do_update) { int ra, rb; unsigned long ea; ra = (instr >> 16) & 0x1f; rb = (instr >> 11) & 0x1f; ea = regs->gpr[rb]; if (ra) { ea += regs->gpr[ra]; if (do_update) /* update forms */ regs->gpr[ra] = ea; } return truncate_if_32bit(regs->msr, ea); } /* * Return the largest power of 2, not greater than sizeof(unsigned long), * such that x is a multiple of it. */ static inline unsigned long max_align(unsigned long x) { x |= sizeof(unsigned long); return x & -x; /* isolates rightmost bit */ } static inline unsigned long byterev_2(unsigned long x) { return ((x >> 8) & 0xff) | ((x & 0xff) << 8); } static inline unsigned long byterev_4(unsigned long x) { return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | ((x & 0xff00) << 8) | ((x & 0xff) << 24); } #ifdef __powerpc64__ static inline unsigned long byterev_8(unsigned long x) { return (byterev_4(x) << 32) | byterev_4(x >> 32); } #endif static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea, int nb) { int err = 0; unsigned long x = 0; switch (nb) { case 1: err = __get_user(x, (unsigned char __user *) ea); break; case 2: err = __get_user(x, (unsigned short __user *) ea); break; case 4: err = __get_user(x, (unsigned int __user *) ea); break; #ifdef __powerpc64__ case 8: err = __get_user(x, (unsigned long __user *) ea); break; #endif } if (!err) *dest = x; return err; } static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { int err; unsigned long x, b, c; /* unaligned, do this in pieces */ x = 0; for (; nb > 0; nb -= c) { c = max_align(ea); if (c > nb) c = max_align(nb); err = read_mem_aligned(&b, ea, c); if (err) return err; x = (x << (8 * c)) + b; ea += c; } *dest = x; return 0; } /* * Read memory at address ea for nb bytes, return 0 for success * or -EFAULT if an error occurred. */ static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & (nb - 1)) == 0) return read_mem_aligned(dest, ea, nb); return read_mem_unaligned(dest, ea, nb, regs); } static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea, int nb) { int err = 0; switch (nb) { case 1: err = __put_user(val, (unsigned char __user *) ea); break; case 2: err = __put_user(val, (unsigned short __user *) ea); break; case 4: err = __put_user(val, (unsigned int __user *) ea); break; #ifdef __powerpc64__ case 8: err = __put_user(val, (unsigned long __user *) ea); break; #endif } return err; } static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { int err; unsigned long c; /* unaligned or little-endian, do this in pieces */ for (; nb > 0; nb -= c) { c = max_align(ea); if (c > nb) c = max_align(nb); err = write_mem_aligned(val >> (nb - c) * 8, ea, c); if (err) return err; ++ea; } return 0; } /* * Write memory at address ea for nb bytes, return 0 for success * or -EFAULT if an error occurred. */ static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & (nb - 1)) == 0) return write_mem_aligned(val, ea, nb); return write_mem_unaligned(val, ea, nb, regs); } #ifdef CONFIG_PPC_FPU /* * Check the address and alignment, and call func to do the actual * load or store. */ static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long), unsigned long ea, int nb, struct pt_regs *regs) { int err; unsigned long val[sizeof(double) / sizeof(long)]; unsigned long ptr; if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & 3) == 0) return (*func)(rn, ea); ptr = (unsigned long) &val[0]; if (sizeof(unsigned long) == 8 || nb == 4) { err = read_mem_unaligned(&val[0], ea, nb, regs); ptr += sizeof(unsigned long) - nb; } else { /* reading a double on 32-bit */ err = read_mem_unaligned(&val[0], ea, 4, regs); if (!err) err = read_mem_unaligned(&val[1], ea + 4, 4, regs); } if (err) return err; return (*func)(rn, ptr); } static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), unsigned long ea, int nb, struct pt_regs *regs) { int err; unsigned long val[sizeof(double) / sizeof(long)]; unsigned long ptr; if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & 3) == 0) return (*func)(rn, ea); ptr = (unsigned long) &val[0]; if (sizeof(unsigned long) == 8 || nb == 4) { ptr += sizeof(unsigned long) - nb; err = (*func)(rn, ptr); if (err) return err; err = write_mem_unaligned(val[0], ea, nb, regs); } else { /* writing a double on 32-bit */ err = (*func)(rn, ptr); if (err) return err; err = write_mem_unaligned(val[0], ea, 4, regs); if (!err) err = write_mem_unaligned(val[1], ea + 4, 4, regs); } return err; } #endif #ifdef CONFIG_ALTIVEC /* For Altivec/VMX, no need to worry about alignment */ static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long), unsigned long ea, struct pt_regs *regs) { if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; return (*func)(rn, ea); } static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long), unsigned long ea, struct pt_regs *regs) { if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; return (*func)(rn, ea); } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long), unsigned long ea, struct pt_regs *regs) { int err; unsigned long val[2]; if (!address_ok(regs, ea, 16)) return -EFAULT; if ((ea & 3) == 0) return (*func)(rn, ea); err = read_mem_unaligned(&val[0], ea, 8, regs); if (!err) err = read_mem_unaligned(&val[1], ea + 8, 8, regs); if (!err) err = (*func)(rn, (unsigned long) &val[0]); return err; } static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), unsigned long ea, struct pt_regs *regs) { int err; unsigned long val[2]; if (!address_ok(regs, ea, 16)) return -EFAULT; if ((ea & 3) == 0) return (*func)(rn, ea); err = (*func)(rn, (unsigned long) &val[0]); if (err) return err; err = write_mem_unaligned(val[0], ea, 8, regs); if (!err) err = write_mem_unaligned(val[1], ea + 8, 8, regs); return err; } #endif /* CONFIG_VSX */ #define __put_user_asmx(x, addr, err, op, cr) \ __asm__ __volatile__( \ "1: " op " %2,0,%3\n" \ " mfcr %1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%4\n" \ " b 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ PPC_LONG_ALIGN "\n" \ PPC_LONG "1b,3b\n" \ ".previous" \ : "=r" (err), "=r" (cr) \ : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) #define __get_user_asmx(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op" %1,0,%2\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ PPC_LONG_ALIGN "\n" \ PPC_LONG "1b,3b\n" \ ".previous" \ : "=r" (err), "=r" (x) \ : "r" (addr), "i" (-EFAULT), "0" (err)) #define __cacheop_user_asmx(addr, err, op) \ __asm__ __volatile__( \ "1: "op" 0,%1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ PPC_LONG_ALIGN "\n" \ PPC_LONG "1b,3b\n" \ ".previous" \ : "=r" (err) \ : "r" (addr), "i" (-EFAULT), "0" (err)) static void __kprobes set_cr0(struct pt_regs *regs, int rd) { long val = regs->gpr[rd]; regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); #ifdef __powerpc64__ if (!(regs->msr & MSR_64BIT)) val = (int) val; #endif if (val < 0) regs->ccr |= 0x80000000; else if (val > 0) regs->ccr |= 0x40000000; else regs->ccr |= 0x20000000; } static void __kprobes add_with_carry(struct pt_regs *regs, int rd, unsigned long val1, unsigned long val2, unsigned long carry_in) { unsigned long val = val1 + val2; if (carry_in) ++val; regs->gpr[rd] = val; #ifdef __powerpc64__ if (!(regs->msr & MSR_64BIT)) { val = (unsigned int) val; val1 = (unsigned int) val1; } #endif if (val < val1 || (carry_in && val == val1)) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; } static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2, int crfld) { unsigned int crval, shift; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; else if (v1 > v2) crval |= 4; else crval |= 2; shift = (7 - crfld) * 4; regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); } static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, unsigned long v2, int crfld) { unsigned int crval, shift; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; else if (v1 > v2) crval |= 4; else crval |= 2; shift = (7 - crfld) * 4; regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); } /* * Elements of 32-bit rotate and mask instructions. */ #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) #ifdef __powerpc64__ #define MASK64_L(mb) (~0UL >> (mb)) #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) #else #define DATA32(x) (x) #endif #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) /* * Emulate instructions that cause a transfer of control, * loads and stores, and a few other instructions. * Returns 1 if the step was emulated, 0 if not, * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) { unsigned int opcode, ra, rb, rd, spr, u; unsigned long int imm; unsigned long int val, val2; unsigned long int ea; unsigned int cr, mb, me, sh; int err; unsigned long old_ra; long ival; opcode = instr >> 26; switch (opcode) { case 16: /* bc */ imm = (signed short)(instr & 0xfffc); if ((instr & 2) == 0) imm += regs->nip; regs->nip += 4; regs->nip = truncate_if_32bit(regs->msr, regs->nip); if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) regs->nip = imm; return 1; #ifdef CONFIG_PPC64 case 17: /* sc */ /* * N.B. this uses knowledge about how the syscall * entry code works. If that is changed, this will * need to be changed also. */ if (regs->gpr[0] == 0x1ebe && cpu_has_feature(CPU_FTR_REAL_LE)) { regs->msr ^= MSR_LE; goto instr_done; } regs->gpr[9] = regs->gpr[13]; regs->gpr[10] = MSR_KERNEL; regs->gpr[11] = regs->nip + 4; regs->gpr[12] = regs->msr & MSR_MASK; regs->gpr[13] = (unsigned long) get_paca(); regs->nip = (unsigned long) &system_call_common; regs->msr = MSR_KERNEL; return 1; #endif case 18: /* b */ imm = instr & 0x03fffffc; if (imm & 0x02000000) imm -= 0x04000000; if ((instr & 2) == 0) imm += regs->nip; if (instr & 1) regs->link = truncate_if_32bit(regs->msr, regs->nip + 4); imm = truncate_if_32bit(regs->msr, imm); regs->nip = imm; return 1; case 19: switch ((instr >> 1) & 0x3ff) { case 16: /* bclr */ case 528: /* bcctr */ imm = (instr & 0x400)? regs->ctr: regs->link; regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); imm = truncate_if_32bit(regs->msr, imm); if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) regs->nip = imm; return 1; case 18: /* rfid, scary */ return -1; case 150: /* isync */ isync(); goto instr_done; case 33: /* crnor */ case 129: /* crandc */ case 193: /* crxor */ case 225: /* crnand */ case 257: /* crand */ case 289: /* creqv */ case 417: /* crorc */ case 449: /* cror */ ra = (instr >> 16) & 0x1f; rb = (instr >> 11) & 0x1f; rd = (instr >> 21) & 0x1f; ra = (regs->ccr >> (31 - ra)) & 1; rb = (regs->ccr >> (31 - rb)) & 1; val = (instr >> (6 + ra * 2 + rb)) & 1; regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) | (val << (31 - rd)); goto instr_done; } break; case 31: switch ((instr >> 1) & 0x3ff) { case 598: /* sync */ #ifdef __powerpc64__ switch ((instr >> 21) & 3) { case 1: /* lwsync */ asm volatile("lwsync" : : : "memory"); goto instr_done; case 2: /* ptesync */ asm volatile("ptesync" : : : "memory"); goto instr_done; } #endif mb(); goto instr_done; case 854: /* eieio */ eieio(); goto instr_done; } break; } /* Following cases refer to regs->gpr[], so we need all regs */ if (!FULL_REGS(regs)) return 0; rd = (instr >> 21) & 0x1f; ra = (instr >> 16) & 0x1f; rb = (instr >> 11) & 0x1f; switch (opcode) { case 7: /* mulli */ regs->gpr[rd] = regs->gpr[ra] * (short) instr; goto instr_done; case 8: /* subfic */ imm = (short) instr; add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1); goto instr_done; case 10: /* cmpli */ imm = (unsigned short) instr; val = regs->gpr[ra]; #ifdef __powerpc64__ if ((rd & 1) == 0) val = (unsigned int) val; #endif do_cmp_unsigned(regs, val, imm, rd >> 2); goto instr_done; case 11: /* cmpi */ imm = (short) instr; val = regs->gpr[ra]; #ifdef __powerpc64__ if ((rd & 1) == 0) val = (int) val; #endif do_cmp_signed(regs, val, imm, rd >> 2); goto instr_done; case 12: /* addic */ imm = (short) instr; add_with_carry(regs, rd, regs->gpr[ra], imm, 0); goto instr_done; case 13: /* addic. */ imm = (short) instr; add_with_carry(regs, rd, regs->gpr[ra], imm, 0); set_cr0(regs, rd); goto instr_done; case 14: /* addi */ imm = (short) instr; if (ra) imm += regs->gpr[ra]; regs->gpr[rd] = imm; goto instr_done; case 15: /* addis */ imm = ((short) instr) << 16; if (ra) imm += regs->gpr[ra]; regs->gpr[rd] = imm; goto instr_done; case 20: /* rlwimi */ mb = (instr >> 6) & 0x1f; me = (instr >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); imm = MASK32(mb, me); regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); goto logical_done; case 21: /* rlwinm */ mb = (instr >> 6) & 0x1f; me = (instr >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 23: /* rlwnm */ mb = (instr >> 6) & 0x1f; me = (instr >> 1) & 0x1f; rb = regs->gpr[rb] & 0x1f; val = DATA32(regs->gpr[rd]); regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 24: /* ori */ imm = (unsigned short) instr; regs->gpr[ra] = regs->gpr[rd] | imm; goto instr_done; case 25: /* oris */ imm = (unsigned short) instr; regs->gpr[ra] = regs->gpr[rd] | (imm << 16); goto instr_done; case 26: /* xori */ imm = (unsigned short) instr; regs->gpr[ra] = regs->gpr[rd] ^ imm; goto instr_done; case 27: /* xoris */ imm = (unsigned short) instr; regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16); goto instr_done; case 28: /* andi. */ imm = (unsigned short) instr; regs->gpr[ra] = regs->gpr[rd] & imm; set_cr0(regs, ra); goto instr_done; case 29: /* andis. */ imm = (unsigned short) instr; regs->gpr[ra] = regs->gpr[rd] & (imm << 16); set_cr0(regs, ra); goto instr_done; #ifdef __powerpc64__ case 30: /* rld* */ mb = ((instr >> 6) & 0x1f) | (instr & 0x20); val = regs->gpr[rd]; if ((instr & 0x10) == 0) { sh = rb | ((instr & 2) << 4); val = ROTATE(val, sh); switch ((instr >> 2) & 3) { case 0: /* rldicl */ regs->gpr[ra] = val & MASK64_L(mb); goto logical_done; case 1: /* rldicr */ regs->gpr[ra] = val & MASK64_R(mb); goto logical_done; case 2: /* rldic */ regs->gpr[ra] = val & MASK64(mb, 63 - sh); goto logical_done; case 3: /* rldimi */ imm = MASK64(mb, 63 - sh); regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (val & imm); goto logical_done; } } else { sh = regs->gpr[rb] & 0x3f; val = ROTATE(val, sh); switch ((instr >> 1) & 7) { case 0: /* rldcl */ regs->gpr[ra] = val & MASK64_L(mb); goto logical_done; case 1: /* rldcr */ regs->gpr[ra] = val & MASK64_R(mb); goto logical_done; } } #endif case 31: switch ((instr >> 1) & 0x3ff) { case 83: /* mfmsr */ if (regs->msr & MSR_PR) break; regs->gpr[rd] = regs->msr & MSR_MASK; goto instr_done; case 146: /* mtmsr */ if (regs->msr & MSR_PR) break; imm = regs->gpr[rd]; if ((imm & MSR_RI) == 0) /* can't step mtmsr that would clear MSR_RI */ return -1; regs->msr = imm; goto instr_done; #ifdef CONFIG_PPC64 case 178: /* mtmsrd */ /* only MSR_EE and MSR_RI get changed if bit 15 set */ /* mtmsrd doesn't change MSR_HV and MSR_ME */ if (regs->msr & MSR_PR) break; imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; imm = (regs->msr & MSR_MASK & ~imm) | (regs->gpr[rd] & imm); if ((imm & MSR_RI) == 0) /* can't step mtmsrd that would clear MSR_RI */ return -1; regs->msr = imm; goto instr_done; #endif case 19: /* mfcr */ regs->gpr[rd] = regs->ccr; regs->gpr[rd] &= 0xffffffffUL; goto instr_done; case 144: /* mtcrf */ imm = 0xf0000000UL; val = regs->gpr[rd]; for (sh = 0; sh < 8; ++sh) { if (instr & (0x80000 >> sh)) regs->ccr = (regs->ccr & ~imm) | (val & imm); imm >>= 4; } goto instr_done; case 339: /* mfspr */ spr = (instr >> 11) & 0x3ff; switch (spr) { case 0x20: /* mfxer */ regs->gpr[rd] = regs->xer; regs->gpr[rd] &= 0xffffffffUL; goto instr_done; case 0x100: /* mflr */ regs->gpr[rd] = regs->link; goto instr_done; case 0x120: /* mfctr */ regs->gpr[rd] = regs->ctr; goto instr_done; } break; case 467: /* mtspr */ spr = (instr >> 11) & 0x3ff; switch (spr) { case 0x20: /* mtxer */ regs->xer = (regs->gpr[rd] & 0xffffffffUL); goto instr_done; case 0x100: /* mtlr */ regs->link = regs->gpr[rd]; goto instr_done; case 0x120: /* mtctr */ regs->ctr = regs->gpr[rd]; goto instr_done; } break; /* * Compare instructions */ case 0: /* cmp */ val = regs->gpr[ra]; val2 = regs->gpr[rb]; #ifdef __powerpc64__ if ((rd & 1) == 0) { /* word (32-bit) compare */ val = (int) val; val2 = (int) val2; } #endif do_cmp_signed(regs, val, val2, rd >> 2); goto instr_done; case 32: /* cmpl */ val = regs->gpr[ra]; val2 = regs->gpr[rb]; #ifdef __powerpc64__ if ((rd & 1) == 0) { /* word (32-bit) compare */ val = (unsigned int) val; val2 = (unsigned int) val2; } #endif do_cmp_unsigned(regs, val, val2, rd >> 2); goto instr_done; /* * Arithmetic instructions */ case 8: /* subfc */ add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], 1); goto arith_done; #ifdef __powerpc64__ case 9: /* mulhdu */ asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 10: /* addc */ add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], 0); goto arith_done; case 11: /* mulhwu */ asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 40: /* subf */ regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; goto arith_done; #ifdef __powerpc64__ case 73: /* mulhd */ asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; #endif case 75: /* mulhw */ asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) : "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); goto arith_done; case 104: /* neg */ regs->gpr[rd] = -regs->gpr[ra]; goto arith_done; case 136: /* subfe */ add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], regs->xer & XER_CA); goto arith_done; case 138: /* adde */ add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], regs->xer & XER_CA); goto arith_done; case 200: /* subfze */ add_with_carry(regs, rd, ~regs->gpr[ra], 0L, regs->xer & XER_CA); goto arith_done; case 202: /* addze */ add_with_carry(regs, rd, regs->gpr[ra], 0L, regs->xer & XER_CA); goto arith_done; case 232: /* subfme */ add_with_carry(regs, rd, ~regs->gpr[ra], -1L, regs->xer & XER_CA); goto arith_done; #ifdef __powerpc64__ case 233: /* mulld */ regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; goto arith_done; #endif case 234: /* addme */ add_with_carry(regs, rd, regs->gpr[ra], -1L, regs->xer & XER_CA); goto arith_done; case 235: /* mullw */ regs->gpr[rd] = (unsigned int) regs->gpr[ra] * (unsigned int) regs->gpr[rb]; goto arith_done; case 266: /* add */ regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 457: /* divdu */ regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; goto arith_done; #endif case 459: /* divwu */ regs->gpr[rd] = (unsigned int) regs->gpr[ra] / (unsigned int) regs->gpr[rb]; goto arith_done; #ifdef __powerpc64__ case 489: /* divd */ regs->gpr[rd] = (long int) regs->gpr[ra] / (long int) regs->gpr[rb]; goto arith_done; #endif case 491: /* divw */ regs->gpr[rd] = (int) regs->gpr[ra] / (int) regs->gpr[rb]; goto arith_done; /* * Logical instructions */ case 26: /* cntlzw */ asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) : "r" (regs->gpr[rd])); goto logical_done; #ifdef __powerpc64__ case 58: /* cntlzd */ asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) : "r" (regs->gpr[rd])); goto logical_done; #endif case 28: /* and */ regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; goto logical_done; case 60: /* andc */ regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; goto logical_done; case 124: /* nor */ regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); goto logical_done; case 284: /* xor */ regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); goto logical_done; case 316: /* xor */ regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; goto logical_done; case 412: /* orc */ regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; goto logical_done; case 444: /* or */ regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; goto logical_done; case 476: /* nand */ regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); goto logical_done; case 922: /* extsh */ regs->gpr[ra] = (signed short) regs->gpr[rd]; goto logical_done; case 954: /* extsb */ regs->gpr[ra] = (signed char) regs->gpr[rd]; goto logical_done; #ifdef __powerpc64__ case 986: /* extsw */ regs->gpr[ra] = (signed int) regs->gpr[rd]; goto logical_done; #endif /* * Shift instructions */ case 24: /* slw */ sh = regs->gpr[rb] & 0x3f; if (sh < 32) regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; else regs->gpr[ra] = 0; goto logical_done; case 536: /* srw */ sh = regs->gpr[rb] & 0x3f; if (sh < 32) regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; else regs->gpr[ra] = 0; goto logical_done; case 792: /* sraw */ sh = regs->gpr[rb] & 0x3f; ival = (signed int) regs->gpr[rd]; regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; goto logical_done; case 824: /* srawi */ sh = rb; ival = (signed int) regs->gpr[rd]; regs->gpr[ra] = ival >> sh; if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; goto logical_done; #ifdef __powerpc64__ case 27: /* sld */ sh = regs->gpr[rd] & 0x7f; if (sh < 64) regs->gpr[ra] = regs->gpr[rd] << sh; else regs->gpr[ra] = 0; goto logical_done; case 539: /* srd */ sh = regs->gpr[rb] & 0x7f; if (sh < 64) regs->gpr[ra] = regs->gpr[rd] >> sh; else regs->gpr[ra] = 0; goto logical_done; case 794: /* srad */ sh = regs->gpr[rb] & 0x7f; ival = (signed long int) regs->gpr[rd]; regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; goto logical_done; case 826: /* sradi with sh_5 = 0 */ case 827: /* sradi with sh_5 = 1 */ sh = rb | ((instr & 2) << 4); ival = (signed long int) regs->gpr[rd]; regs->gpr[ra] = ival >> sh; if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; goto logical_done; #endif /* __powerpc64__ */ /* * Cache instructions */ case 54: /* dcbst */ ea = xform_ea(instr, regs, 0); if (!address_ok(regs, ea, 8)) return 0; err = 0; __cacheop_user_asmx(ea, err, "dcbst"); if (err) return 0; goto instr_done; case 86: /* dcbf */ ea = xform_ea(instr, regs, 0); if (!address_ok(regs, ea, 8)) return 0; err = 0; __cacheop_user_asmx(ea, err, "dcbf"); if (err) return 0; goto instr_done; case 246: /* dcbtst */ if (rd == 0) { ea = xform_ea(instr, regs, 0); prefetchw((void *) ea); } goto instr_done; case 278: /* dcbt */ if (rd == 0) { ea = xform_ea(instr, regs, 0); prefetch((void *) ea); } goto instr_done; } break; } /* * Following cases are for loads and stores, so bail out * if we're in little-endian mode. */ if (regs->msr & MSR_LE) return 0; /* * Save register RA in case it's an update form load or store * and the access faults. */ old_ra = regs->gpr[ra]; switch (opcode) { case 31: u = instr & 0x40; switch ((instr >> 1) & 0x3ff) { case 20: /* lwarx */ ea = xform_ea(instr, regs, 0); if (ea & 3) break; /* can't handle misaligned */ err = -EFAULT; if (!address_ok(regs, ea, 4)) goto ldst_done; err = 0; __get_user_asmx(val, ea, err, "lwarx"); if (!err) regs->gpr[rd] = val; goto ldst_done; case 150: /* stwcx. */ ea = xform_ea(instr, regs, 0); if (ea & 3) break; /* can't handle misaligned */ err = -EFAULT; if (!address_ok(regs, ea, 4)) goto ldst_done; err = 0; __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr); if (!err) regs->ccr = (regs->ccr & 0x0fffffff) | (cr & 0xe0000000) | ((regs->xer >> 3) & 0x10000000); goto ldst_done; #ifdef __powerpc64__ case 84: /* ldarx */ ea = xform_ea(instr, regs, 0); if (ea & 7) break; /* can't handle misaligned */ err = -EFAULT; if (!address_ok(regs, ea, 8)) goto ldst_done; err = 0; __get_user_asmx(val, ea, err, "ldarx"); if (!err) regs->gpr[rd] = val; goto ldst_done; case 214: /* stdcx. */ ea = xform_ea(instr, regs, 0); if (ea & 7) break; /* can't handle misaligned */ err = -EFAULT; if (!address_ok(regs, ea, 8)) goto ldst_done; err = 0; __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr); if (!err) regs->ccr = (regs->ccr & 0x0fffffff) | (cr & 0xe0000000) | ((regs->xer >> 3) & 0x10000000); goto ldst_done; case 21: /* ldx */ case 53: /* ldux */ err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u), 8, regs); goto ldst_done; #endif case 23: /* lwzx */ case 55: /* lwzux */ err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u), 4, regs); goto ldst_done; case 87: /* lbzx */ case 119: /* lbzux */ err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u), 1, regs); goto ldst_done; #ifdef CONFIG_ALTIVEC case 103: /* lvx */ case 359: /* lvxl */ if (!(regs->msr & MSR_VEC)) break; ea = xform_ea(instr, regs, 0); err = do_vec_load(rd, do_lvx, ea, regs); goto ldst_done; case 231: /* stvx */ case 487: /* stvxl */ if (!(regs->msr & MSR_VEC)) break; ea = xform_ea(instr, regs, 0); err = do_vec_store(rd, do_stvx, ea, regs); goto ldst_done; #endif /* CONFIG_ALTIVEC */ #ifdef __powerpc64__ case 149: /* stdx */ case 181: /* stdux */ val = regs->gpr[rd]; err = write_mem(val, xform_ea(instr, regs, u), 8, regs); goto ldst_done; #endif case 151: /* stwx */ case 183: /* stwux */ val = regs->gpr[rd]; err = write_mem(val, xform_ea(instr, regs, u), 4, regs); goto ldst_done; case 215: /* stbx */ case 247: /* stbux */ val = regs->gpr[rd]; err = write_mem(val, xform_ea(instr, regs, u), 1, regs); goto ldst_done; case 279: /* lhzx */ case 311: /* lhzux */ err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u), 2, regs); goto ldst_done; #ifdef __powerpc64__ case 341: /* lwax */ case 373: /* lwaux */ err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u), 4, regs); if (!err) regs->gpr[rd] = (signed int) regs->gpr[rd]; goto ldst_done; #endif case 343: /* lhax */ case 375: /* lhaux */ err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u), 2, regs); if (!err) regs->gpr[rd] = (signed short) regs->gpr[rd]; goto ldst_done; case 407: /* sthx */ case 439: /* sthux */ val = regs->gpr[rd]; err = write_mem(val, xform_ea(instr, regs, u), 2, regs); goto ldst_done; #ifdef __powerpc64__ case 532: /* ldbrx */ err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs); if (!err) regs->gpr[rd] = byterev_8(val); goto ldst_done; #endif case 534: /* lwbrx */ err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs); if (!err) regs->gpr[rd] = byterev_4(val); goto ldst_done; #ifdef CONFIG_PPC_CPU case 535: /* lfsx */ case 567: /* lfsux */ if (!(regs->msr & MSR_FP)) break; ea = xform_ea(instr, regs, u); err = do_fp_load(rd, do_lfs, ea, 4, regs); goto ldst_done; case 599: /* lfdx */ case 631: /* lfdux */ if (!(regs->msr & MSR_FP)) break; ea = xform_ea(instr, regs, u); err = do_fp_load(rd, do_lfd, ea, 8, regs); goto ldst_done; case 663: /* stfsx */ case 695: /* stfsux */ if (!(regs->msr & MSR_FP)) break; ea = xform_ea(instr, regs, u); err = do_fp_store(rd, do_stfs, ea, 4, regs); goto ldst_done; case 727: /* stfdx */ case 759: /* stfdux */ if (!(regs->msr & MSR_FP)) break; ea = xform_ea(instr, regs, u); err = do_fp_store(rd, do_stfd, ea, 8, regs); goto ldst_done; #endif #ifdef __powerpc64__ case 660: /* stdbrx */ val = byterev_8(regs->gpr[rd]); err = write_mem(val, xform_ea(instr, regs, 0), 8, regs); goto ldst_done; #endif case 662: /* stwbrx */ val = byterev_4(regs->gpr[rd]); err = write_mem(val, xform_ea(instr, regs, 0), 4, regs); goto ldst_done; case 790: /* lhbrx */ err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs); if (!err) regs->gpr[rd] = byterev_2(val); goto ldst_done; case 918: /* sthbrx */ val = byterev_2(regs->gpr[rd]); err = write_mem(val, xform_ea(instr, regs, 0), 2, regs); goto ldst_done; #ifdef CONFIG_VSX case 844: /* lxvd2x */ case 876: /* lxvd2ux */ if (!(regs->msr & MSR_VSX)) break; rd |= (instr & 1) << 5; ea = xform_ea(instr, regs, u); err = do_vsx_load(rd, do_lxvd2x, ea, regs); goto ldst_done; case 972: /* stxvd2x */ case 1004: /* stxvd2ux */ if (!(regs->msr & MSR_VSX)) break; rd |= (instr & 1) << 5; ea = xform_ea(instr, regs, u); err = do_vsx_store(rd, do_stxvd2x, ea, regs); goto ldst_done; #endif /* CONFIG_VSX */ } break; case 32: /* lwz */ case 33: /* lwzu */ err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs); goto ldst_done; case 34: /* lbz */ case 35: /* lbzu */ err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs); goto ldst_done; case 36: /* stw */ case 37: /* stwu */ val = regs->gpr[rd]; err = write_mem(val, dform_ea(instr, regs), 4, regs); goto ldst_done; case 38: /* stb */ case 39: /* stbu */ val = regs->gpr[rd]; err = write_mem(val, dform_ea(instr, regs), 1, regs); goto ldst_done; case 40: /* lhz */ case 41: /* lhzu */ err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs); goto ldst_done; case 42: /* lha */ case 43: /* lhau */ err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs); if (!err) regs->gpr[rd] = (signed short) regs->gpr[rd]; goto ldst_done; case 44: /* sth */ case 45: /* sthu */ val = regs->gpr[rd]; err = write_mem(val, dform_ea(instr, regs), 2, regs); goto ldst_done; case 46: /* lmw */ ra = (instr >> 16) & 0x1f; if (ra >= rd) break; /* invalid form, ra in range to load */ ea = dform_ea(instr, regs); do { err = read_mem(&regs->gpr[rd], ea, 4, regs); if (err) return 0; ea += 4; } while (++rd < 32); goto instr_done; case 47: /* stmw */ ea = dform_ea(instr, regs); do { err = write_mem(regs->gpr[rd], ea, 4, regs); if (err) return 0; ea += 4; } while (++rd < 32); goto instr_done; #ifdef CONFIG_PPC_FPU case 48: /* lfs */ case 49: /* lfsu */ if (!(regs->msr & MSR_FP)) break; ea = dform_ea(instr, regs); err = do_fp_load(rd, do_lfs, ea, 4, regs); goto ldst_done; case 50: /* lfd */ case 51: /* lfdu */ if (!(regs->msr & MSR_FP)) break; ea = dform_ea(instr, regs); err = do_fp_load(rd, do_lfd, ea, 8, regs); goto ldst_done; case 52: /* stfs */ case 53: /* stfsu */ if (!(regs->msr & MSR_FP)) break; ea = dform_ea(instr, regs); err = do_fp_store(rd, do_stfs, ea, 4, regs); goto ldst_done; case 54: /* stfd */ case 55: /* stfdu */ if (!(regs->msr & MSR_FP)) break; ea = dform_ea(instr, regs); err = do_fp_store(rd, do_stfd, ea, 8, regs); goto ldst_done; #endif #ifdef __powerpc64__ case 58: /* ld[u], lwa */ switch (instr & 3) { case 0: /* ld */ err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs), 8, regs); goto ldst_done; case 1: /* ldu */ err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs), 8, regs); goto ldst_done; case 2: /* lwa */ err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs), 4, regs); if (!err) regs->gpr[rd] = (signed int) regs->gpr[rd]; goto ldst_done; } break; case 62: /* std[u] */ val = regs->gpr[rd]; switch (instr & 3) { case 0: /* std */ err = write_mem(val, dsform_ea(instr, regs), 8, regs); goto ldst_done; case 1: /* stdu */ err = write_mem(val, dsform_ea(instr, regs), 8, regs); goto ldst_done; } break; #endif /* __powerpc64__ */ } err = -EINVAL; ldst_done: if (err) { regs->gpr[ra] = old_ra; return 0; /* invoke DSI if -EFAULT? */ } instr_done: regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); return 1; logical_done: if (instr & 1) set_cr0(regs, ra); goto instr_done; arith_done: if (instr & 1) set_cr0(regs, rd); goto instr_done; }
gpl-2.0
divergence-kernel/d2lte-kernel
arch/tile/kernel/init_task.c
7690
1802
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/init_task.h> #include <linux/mqueue.h> #include <linux/module.h> #include <linux/start_kernel.h> #include <linux/uaccess.h> static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); /* * Initial thread structure. * * We need to make sure that this is THREAD_SIZE aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task); /* * per-CPU stack and boot info. */ DEFINE_PER_CPU(unsigned long, boot_sp) = (unsigned long)init_stack + THREAD_SIZE; #ifdef CONFIG_SMP DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; #else /* * The variable must be __initdata since it references __init code. * With CONFIG_SMP it is per-cpu data, which is exempt from validation. */ unsigned long __initdata boot_pc = (unsigned long)start_kernel; #endif
gpl-2.0
librae8226/linux-3.0
arch/mips/pmc-sierra/yosemite/ht.c
8458
12179
/* * Copyright 2003 PMC-Sierra * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <asm/pci.h> #include <asm/io.h> #include <linux/init.h> #include <asm/titan_dep.h> #ifdef CONFIG_HYPERTRANSPORT /* * This function check if the Hypertransport Link Initialization completed. If * it did, then proceed further with scanning bus #2 */ static __inline__ int check_titan_htlink(void) { u32 val; val = *(volatile uint32_t *)(RM9000x2_HTLINK_REG); if (val & 0x00000020) /* HT Link Initialization completed */ return 1; else return 0; } static int titan_ht_config_read_dword(struct pci_dev *device, int offset, u32* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); RM9K_READ(data_reg, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_read_word(struct pci_dev *device, int offset, u16* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; if ((offset & 0x3) == 0) offset = 0x2; else offset = 0x0; RM9K_WRITE(address_reg, address); RM9K_READ_16(data_reg + offset, val); return PCIBIOS_SUCCESSFUL; } u32 longswap(unsigned long l) { unsigned char b1, b2, b3, b4; b1 = l&255; b2 = (l>>8)&255; b3 = (l>>16)&255; b4 = (l>>24)&255; return ((b1<<24) + (b2<<16) + (b3<<8) + b4); } static int titan_ht_config_read_byte(struct pci_dev *device, int offset, u8* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; int offset1; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); if ((offset & 0x3) == 0) { offset1 = 0x3; } if ((offset & 0x3) == 1) { offset1 = 0x2; } if ((offset & 0x3) == 2) { offset1 = 0x1; } if ((offset & 0x3) == 3) { offset1 = 0x0; } RM9K_READ_8(data_reg + offset1, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_dword(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); RM9K_WRITE(data_reg, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_word(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; if ((offset & 0x3) == 0) offset = 0x2; else offset = 0x0; RM9K_WRITE(address_reg, address); RM9K_WRITE_16(data_reg + offset, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_byte(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; int offset1; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); if ((offset & 0x3) == 0) { offset1 = 0x3; } if ((offset & 0x3) == 1) { offset1 = 0x2; } if ((offset & 0x3) == 2) { offset1 = 0x1; } if ((offset & 0x3) == 3) { offset1 = 0x0; } RM9K_WRITE_8(data_reg + offset1, val); return PCIBIOS_SUCCESSFUL; } static void titan_pcibios_set_master(struct pci_dev *dev) { u16 cmd; int bus = dev->bus->number; if (check_titan_htlink()) titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MASTER; if (check_titan_htlink()) titan_ht_config_write_word(dev, PCI_COMMAND, cmd); } int pcibios_enable_resources(struct pci_dev *dev) { u16 cmd, old_cmd; u8 tmp1; int idx; struct resource *r; int bus = dev->bus->number; if (check_titan_htlink()) titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx = 0; idx < 6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available because of " "resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { if (check_titan_htlink()) titan_ht_config_write_word(dev, PCI_COMMAND, cmd); } if (check_titan_htlink()) titan_ht_config_read_byte(dev, PCI_CACHE_LINE_SIZE, &tmp1); if (tmp1 != 8) { printk(KERN_WARNING "PCI setting cache line size to 8 from " "%d\n", tmp1); } if (check_titan_htlink()) titan_ht_config_write_byte(dev, PCI_CACHE_LINE_SIZE, 8); if (check_titan_htlink()) titan_ht_config_read_byte(dev, PCI_LATENCY_TIMER, &tmp1); if (tmp1 < 32 || tmp1 == 0xff) { printk(KERN_WARNING "PCI setting latency timer to 32 from %d\n", tmp1); } if (check_titan_htlink()) titan_ht_config_write_byte(dev, PCI_LATENCY_TIMER, 32); return 0; } int pcibios_enable_device(struct pci_dev *dev, int mask) { return pcibios_enable_resources(dev); } resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* We need to avoid collisions with `mirrored' VGA ports and other strange ISA hardware, so we always want the addresses kilobyte aligned. */ if (size > 0x100) { printk(KERN_ERR "PCI: I/O Region %s/%d too large" " (%ld bytes)\n", pci_name(dev), dev->resource - res, size); } start = (start + 1024 - 1) & ~(1024 - 1); } return start; } struct pci_ops titan_pci_ops = { titan_ht_config_read_byte, titan_ht_config_read_word, titan_ht_config_read_dword, titan_ht_config_write_byte, titan_ht_config_write_word, titan_ht_config_write_dword }; void __init pcibios_fixup_bus(struct pci_bus *c) { titan_ht_pcibios_fixup_bus(c); } void __init pcibios_init(void) { /* Reset PCI I/O and PCI MEM values */ /* XXX Need to add the proper values here */ ioport_resource.start = 0xe0000000; ioport_resource.end = 0xe0000000 + 0x20000000 - 1; iomem_resource.start = 0xc0000000; iomem_resource.end = 0xc0000000 + 0x20000000 - 1; /* XXX Need to add bus values */ pci_scan_bus(2, &titan_pci_ops, NULL); pci_scan_bus(3, &titan_pci_ops, NULL); } /* * for parsing "pci=" kernel boot arguments. */ char *pcibios_setup(char *str) { printk(KERN_INFO "rr: pcibios_setup\n"); /* Nothing to do for now. */ return str; } unsigned __init int pcibios_assign_all_busses(void) { /* We want to use the PCI bus detection done by PMON */ return 0; } #endif /* CONFIG_HYPERTRANSPORT */
gpl-2.0
Krabappel2548/MSM8660_CM9.0_Pure_ICS_kernel
arch/mips/pmc-sierra/yosemite/ht.c
8458
12179
/* * Copyright 2003 PMC-Sierra * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <asm/pci.h> #include <asm/io.h> #include <linux/init.h> #include <asm/titan_dep.h> #ifdef CONFIG_HYPERTRANSPORT /* * This function check if the Hypertransport Link Initialization completed. If * it did, then proceed further with scanning bus #2 */ static __inline__ int check_titan_htlink(void) { u32 val; val = *(volatile uint32_t *)(RM9000x2_HTLINK_REG); if (val & 0x00000020) /* HT Link Initialization completed */ return 1; else return 0; } static int titan_ht_config_read_dword(struct pci_dev *device, int offset, u32* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); RM9K_READ(data_reg, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_read_word(struct pci_dev *device, int offset, u16* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; if ((offset & 0x3) == 0) offset = 0x2; else offset = 0x0; RM9K_WRITE(address_reg, address); RM9K_READ_16(data_reg + offset, val); return PCIBIOS_SUCCESSFUL; } u32 longswap(unsigned long l) { unsigned char b1, b2, b3, b4; b1 = l&255; b2 = (l>>8)&255; b3 = (l>>16)&255; b4 = (l>>24)&255; return ((b1<<24) + (b2<<16) + (b3<<8) + b4); } static int titan_ht_config_read_byte(struct pci_dev *device, int offset, u8* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; int offset1; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); if ((offset & 0x3) == 0) { offset1 = 0x3; } if ((offset & 0x3) == 1) { offset1 = 0x2; } if ((offset & 0x3) == 2) { offset1 = 0x1; } if ((offset & 0x3) == 3) { offset1 = 0x0; } RM9K_READ_8(data_reg + offset1, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_dword(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); RM9K_WRITE(data_reg, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_word(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; if ((offset & 0x3) == 0) offset = 0x2; else offset = 0x0; RM9K_WRITE(address_reg, address); RM9K_WRITE_16(data_reg + offset, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_byte(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; int offset1; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); if ((offset & 0x3) == 0) { offset1 = 0x3; } if ((offset & 0x3) == 1) { offset1 = 0x2; } if ((offset & 0x3) == 2) { offset1 = 0x1; } if ((offset & 0x3) == 3) { offset1 = 0x0; } RM9K_WRITE_8(data_reg + offset1, val); return PCIBIOS_SUCCESSFUL; } static void titan_pcibios_set_master(struct pci_dev *dev) { u16 cmd; int bus = dev->bus->number; if (check_titan_htlink()) titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MASTER; if (check_titan_htlink()) titan_ht_config_write_word(dev, PCI_COMMAND, cmd); } int pcibios_enable_resources(struct pci_dev *dev) { u16 cmd, old_cmd; u8 tmp1; int idx; struct resource *r; int bus = dev->bus->number; if (check_titan_htlink()) titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx = 0; idx < 6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available because of " "resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { if (check_titan_htlink()) titan_ht_config_write_word(dev, PCI_COMMAND, cmd); } if (check_titan_htlink()) titan_ht_config_read_byte(dev, PCI_CACHE_LINE_SIZE, &tmp1); if (tmp1 != 8) { printk(KERN_WARNING "PCI setting cache line size to 8 from " "%d\n", tmp1); } if (check_titan_htlink()) titan_ht_config_write_byte(dev, PCI_CACHE_LINE_SIZE, 8); if (check_titan_htlink()) titan_ht_config_read_byte(dev, PCI_LATENCY_TIMER, &tmp1); if (tmp1 < 32 || tmp1 == 0xff) { printk(KERN_WARNING "PCI setting latency timer to 32 from %d\n", tmp1); } if (check_titan_htlink()) titan_ht_config_write_byte(dev, PCI_LATENCY_TIMER, 32); return 0; } int pcibios_enable_device(struct pci_dev *dev, int mask) { return pcibios_enable_resources(dev); } resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* We need to avoid collisions with `mirrored' VGA ports and other strange ISA hardware, so we always want the addresses kilobyte aligned. */ if (size > 0x100) { printk(KERN_ERR "PCI: I/O Region %s/%d too large" " (%ld bytes)\n", pci_name(dev), dev->resource - res, size); } start = (start + 1024 - 1) & ~(1024 - 1); } return start; } struct pci_ops titan_pci_ops = { titan_ht_config_read_byte, titan_ht_config_read_word, titan_ht_config_read_dword, titan_ht_config_write_byte, titan_ht_config_write_word, titan_ht_config_write_dword }; void __init pcibios_fixup_bus(struct pci_bus *c) { titan_ht_pcibios_fixup_bus(c); } void __init pcibios_init(void) { /* Reset PCI I/O and PCI MEM values */ /* XXX Need to add the proper values here */ ioport_resource.start = 0xe0000000; ioport_resource.end = 0xe0000000 + 0x20000000 - 1; iomem_resource.start = 0xc0000000; iomem_resource.end = 0xc0000000 + 0x20000000 - 1; /* XXX Need to add bus values */ pci_scan_bus(2, &titan_pci_ops, NULL); pci_scan_bus(3, &titan_pci_ops, NULL); } /* * for parsing "pci=" kernel boot arguments. */ char *pcibios_setup(char *str) { printk(KERN_INFO "rr: pcibios_setup\n"); /* Nothing to do for now. */ return str; } unsigned __init int pcibios_assign_all_busses(void) { /* We want to use the PCI bus detection done by PMON */ return 0; } #endif /* CONFIG_HYPERTRANSPORT */
gpl-2.0
flar2/m8-Sense-5.0.1
arch/m68k/amiga/pcmcia.c
14602
2561
/* ** asm-m68k/pcmcia.c -- Amiga Linux PCMCIA support ** most information was found by disassembling card.resource ** I'm still looking for an official doc ! ** ** Copyright 1997 by Alain Malek ** ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. ** ** Created: 12/10/97 by Alain Malek */ #include <linux/types.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/module.h> #include <asm/amigayle.h> #include <asm/amipcmcia.h> /* gayle config byte for program voltage and access speed */ static unsigned char cfg_byte = GAYLE_CFG_0V|GAYLE_CFG_150NS; void pcmcia_reset(void) { unsigned long reset_start_time = jiffies; unsigned char b; gayle_reset = 0x00; while (time_before(jiffies, reset_start_time + 1*HZ/100)); b = gayle_reset; } EXPORT_SYMBOL(pcmcia_reset); /* copy a tuple, including tuple header. return nb bytes copied */ /* be careful as this may trigger a GAYLE_IRQ_WR interrupt ! */ int pcmcia_copy_tuple(unsigned char tuple_id, void *tuple, int max_len) { unsigned char id, *dest; int cnt, pos, len; dest = tuple; pos = 0; id = gayle_attribute[pos]; while((id != CISTPL_END) && (pos < 0x10000)) { len = (int)gayle_attribute[pos+2] + 2; if (id == tuple_id) { len = (len > max_len)?max_len:len; for (cnt = 0; cnt < len; cnt++) { *dest++ = gayle_attribute[pos+(cnt<<1)]; } return len; } pos += len<<1; id = gayle_attribute[pos]; } return 0; } EXPORT_SYMBOL(pcmcia_copy_tuple); void pcmcia_program_voltage(int voltage) { unsigned char v; switch (voltage) { case PCMCIA_0V: v = GAYLE_CFG_0V; break; case PCMCIA_5V: v = GAYLE_CFG_5V; break; case PCMCIA_12V: v = GAYLE_CFG_12V; break; default: v = GAYLE_CFG_0V; } cfg_byte = (cfg_byte & 0xfc) | v; gayle.config = cfg_byte; } EXPORT_SYMBOL(pcmcia_program_voltage); void pcmcia_access_speed(int speed) { unsigned char s; if (speed <= PCMCIA_SPEED_100NS) s = GAYLE_CFG_100NS; else if (speed <= PCMCIA_SPEED_150NS) s = GAYLE_CFG_150NS; else if (speed <= PCMCIA_SPEED_250NS) s = GAYLE_CFG_250NS; else s = GAYLE_CFG_720NS; cfg_byte = (cfg_byte & 0xf3) | s; gayle.config = cfg_byte; } EXPORT_SYMBOL(pcmcia_access_speed); void pcmcia_write_enable(void) { gayle.cardstatus = GAYLE_CS_WR|GAYLE_CS_DA; } EXPORT_SYMBOL(pcmcia_write_enable); void pcmcia_write_disable(void) { gayle.cardstatus = 0; } EXPORT_SYMBOL(pcmcia_write_disable);
gpl-2.0
matianfu/kunlun-kernel
drivers/video/cfbfillrect.c
14858
8940
/* * Generic fillrect for frame buffers with packed pixels of any depth. * * Copyright (C) 2000 James Simmons (jsimmons@linux-fbdev.org) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * NOTES: * * Also need to add code to deal with cards endians that are different than * the native cpu endians. I also need to deal with MSB position in the word. * */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/types.h> #include "fb_draw.h" #if BITS_PER_LONG == 32 # define FB_WRITEL fb_writel # define FB_READL fb_readl #else # define FB_WRITEL fb_writeq # define FB_READL fb_readq #endif /* * Aligned pattern fill using 32/64-bit memory accesses */ static void bitfill_aligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, unsigned long pat, unsigned n, int bits, u32 bswapmask) { unsigned long first, last; if (!n) return; first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); if (dst_idx+n <= bits) { // Single word if (last) first &= last; FB_WRITEL(comp(pat, FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first!= ~0UL) { FB_WRITEL(comp(pat, FB_READL(dst), first), dst); dst++; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); FB_WRITEL(pat, dst++); n -= 8; } while (n--) FB_WRITEL(pat, dst++); // Trailing bits if (last) FB_WRITEL(comp(pat, FB_READL(dst), last), dst); } } /* * Unaligned generic pattern fill using 32/64-bit memory accesses * The pattern must have been expanded to a full 32/64-bit value * Left/right are the appropriate shifts to convert to the pattern to be * used for the next 32/64-bit word */ static void bitfill_unaligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, unsigned long pat, int left, int right, unsigned n, int bits) { unsigned long first, last; if (!n) return; first = FB_SHIFT_HIGH(p, ~0UL, dst_idx); last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); if (dst_idx+n <= bits) { // Single word if (last) first &= last; FB_WRITEL(comp(pat, FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first) { FB_WRITEL(comp(pat, FB_READL(dst), first), dst); dst++; pat = pat << left | pat >> right; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 4) { FB_WRITEL(pat, dst++); pat = pat << left | pat >> right; FB_WRITEL(pat, dst++); pat = pat << left | pat >> right; FB_WRITEL(pat, dst++); pat = pat << left | pat >> right; FB_WRITEL(pat, dst++); pat = pat << left | pat >> right; n -= 4; } while (n--) { FB_WRITEL(pat, dst++); pat = pat << left | pat >> right; } // Trailing bits if (last) FB_WRITEL(comp(pat, FB_READL(dst), last), dst); } } /* * Aligned pattern invert using 32/64-bit memory accesses */ static void bitfill_aligned_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, unsigned long pat, unsigned n, int bits, u32 bswapmask) { unsigned long val = pat, dat; unsigned long first, last; if (!n) return; first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); if (dst_idx+n <= bits) { // Single word if (last) first &= last; dat = FB_READL(dst); FB_WRITEL(comp(dat ^ val, dat, first), dst); } else { // Multiple destination words // Leading bits if (first!=0UL) { dat = FB_READL(dst); FB_WRITEL(comp(dat ^ val, dat, first), dst); dst++; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; n -= 8; } while (n--) { FB_WRITEL(FB_READL(dst) ^ val, dst); dst++; } // Trailing bits if (last) { dat = FB_READL(dst); FB_WRITEL(comp(dat ^ val, dat, last), dst); } } } /* * Unaligned generic pattern invert using 32/64-bit memory accesses * The pattern must have been expanded to a full 32/64-bit value * Left/right are the appropriate shifts to convert to the pattern to be * used for the next 32/64-bit word */ static void bitfill_unaligned_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, unsigned long pat, int left, int right, unsigned n, int bits) { unsigned long first, last, dat; if (!n) return; first = FB_SHIFT_HIGH(p, ~0UL, dst_idx); last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); if (dst_idx+n <= bits) { // Single word if (last) first &= last; dat = FB_READL(dst); FB_WRITEL(comp(dat ^ pat, dat, first), dst); } else { // Multiple destination words // Leading bits if (first != 0UL) { dat = FB_READL(dst); FB_WRITEL(comp(dat ^ pat, dat, first), dst); dst++; pat = pat << left | pat >> right; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 4) { FB_WRITEL(FB_READL(dst) ^ pat, dst); dst++; pat = pat << left | pat >> right; FB_WRITEL(FB_READL(dst) ^ pat, dst); dst++; pat = pat << left | pat >> right; FB_WRITEL(FB_READL(dst) ^ pat, dst); dst++; pat = pat << left | pat >> right; FB_WRITEL(FB_READL(dst) ^ pat, dst); dst++; pat = pat << left | pat >> right; n -= 4; } while (n--) { FB_WRITEL(FB_READL(dst) ^ pat, dst); dst++; pat = pat << left | pat >> right; } // Trailing bits if (last) { dat = FB_READL(dst); FB_WRITEL(comp(dat ^ pat, dat, last), dst); } } } void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect) { unsigned long pat, pat2, fg; unsigned long width = rect->width, height = rect->height; int bits = BITS_PER_LONG, bytes = bits >> 3; u32 bpp = p->var.bits_per_pixel; unsigned long __iomem *dst; int dst_idx, left; if (p->state != FBINFO_STATE_RUNNING) return; if (p->fix.visual == FB_VISUAL_TRUECOLOR || p->fix.visual == FB_VISUAL_DIRECTCOLOR ) fg = ((u32 *) (p->pseudo_palette))[rect->color]; else fg = rect->color; pat = pixel_to_pat(bpp, fg); dst = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8; dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp; /* FIXME For now we support 1-32 bpp only */ left = bits % bpp; if (p->fbops->fb_sync) p->fbops->fb_sync(p); if (!left) { u32 bswapmask = fb_compute_bswapmask(p); void (*fill_op32)(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, unsigned long pat, unsigned n, int bits, u32 bswapmask) = NULL; switch (rect->rop) { case ROP_XOR: fill_op32 = bitfill_aligned_rev; break; case ROP_COPY: fill_op32 = bitfill_aligned; break; default: printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n"); fill_op32 = bitfill_aligned; break; } while (height--) { dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bits - 1); fill_op32(p, dst, dst_idx, pat, width*bpp, bits, bswapmask); dst_idx += p->fix.line_length*8; } } else { int right, r; void (*fill_op)(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, unsigned long pat, int left, int right, unsigned n, int bits) = NULL; #ifdef __LITTLE_ENDIAN right = left; left = bpp - right; #else right = bpp - left; #endif switch (rect->rop) { case ROP_XOR: fill_op = bitfill_unaligned_rev; break; case ROP_COPY: fill_op = bitfill_unaligned; break; default: printk(KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n"); fill_op = bitfill_unaligned; break; } while (height--) { dst += dst_idx / bits; dst_idx &= (bits - 1); r = dst_idx % bpp; /* rotate pattern to the correct start position */ pat2 = le_long_to_cpu(rolx(cpu_to_le_long(pat), r, bpp)); fill_op(p, dst, dst_idx, pat2, left, right, width*bpp, bits); dst_idx += p->fix.line_length*8; } } } EXPORT_SYMBOL(cfb_fillrect); MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); MODULE_DESCRIPTION("Generic software accelerated fill rectangle"); MODULE_LICENSE("GPL");
gpl-2.0
hustcalm/coreboot-hacking
src/vendorcode/amd/agesa/f14/Proc/Mem/NB/ON/mnregon.c
11
30190
/* $NoKeywords:$ */ /** * @file * * mnregon.c * * Common Northbridge register related functions for ON * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: (Mem/NB/ON) * @e \$Revision: 48511 $ @e \$Date: 2011-03-09 13:53:13 -0700 (Wed, 09 Mar 2011) $ * **/ /* ***************************************************************************** * * Copyright (c) 2011, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * *************************************************************************** * */ /* *---------------------------------------------------------------------------- * MODULES USED * *---------------------------------------------------------------------------- */ #include "AGESA.h" #include "AdvancedApi.h" #include "amdlib.h" #include "Ids.h" #include "OptionMemory.h" #include "mm.h" #include "mn.h" #include "mnon.h" #include "merrhdl.h" #include "cpuRegisters.h" #include "cpuFamRegisters.h" #include "cpuFamilyTranslation.h" #include "heapManager.h" #include "Filecode.h" #define FILECODE PROC_MEM_NB_ON_MNREGON_FILECODE /*---------------------------------------------------------------------------- * DEFINITIONS AND MACROS * *---------------------------------------------------------------------------- */ #define PHY_DIRECT_ADDRESS_MASK 0x0D000000 STATIC CONST UINT8 InstancesPerTypeON[8] = {8, 2, 1, 0, 2, 0, 1, 1}; /*---------------------------------------------------------------------------- * TYPEDEFS AND STRUCTURES * *---------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- * PROTOTYPES OF LOCAL FUNCTIONS * *---------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- * EXPORTED FUNCTIONS * *---------------------------------------------------------------------------- */ /*-----------------------------------------------------------------------------*/ /** * * This function matches the CPU_LOGICAL_ID with certain criteria to * determine if it is supported by this NBBlock. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] *LogicalIdPtr - Pointer to the CPU_LOGICAL_ID * * @return TRUE - This node is a Ontario. * @return FALSE - This node is not a Ontario. */ BOOLEAN MemNIsIdSupportedON ( IN OUT MEM_NB_BLOCK *NBPtr, IN CPU_LOGICAL_ID *LogicalIdPtr ) { if (((LogicalIdPtr->Family & AMD_FAMILY_14_ON) != 0) && ((LogicalIdPtr->Revision & AMD_F14_ALL) != 0)) { return TRUE; } else { return FALSE; } } /* -----------------------------------------------------------------------------*/ /** * * Check if bitfields of all enabled DCTs on a die have the expected value. Ignore * DCTs that are disabled. * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] FieldName - Bit Field name * @param[in] Field - Value to be checked * * @return TRUE - All enabled DCTs have the expected value on the bitfield. * @return FALSE - Not all enabled DCTs have the expected value on the bitfield. * * ---------------------------------------------------------------------------- */ BOOLEAN MemNBrdcstCheckON ( IN OUT MEM_NB_BLOCK *NBPtr, IN BIT_FIELD_NAME FieldName, IN UINT32 Field ) { if (MemNGetBitFieldNb (NBPtr, FieldName) != Field) { return FALSE; } return TRUE; } /*---------------------------------------------------------------------------- * LOCAL FUNCTIONS * *----------------------------------------------------------------------------*/ /* -----------------------------------------------------------------------------*/ /** * * * This function gets or sets a value to a bit field in a PCI register. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] FieldName - Bit Field to be programmed * @param[in] Field - Value to be programmed * @param[in] IsSet - Indicates if the function will set or get * * @return value read, if the function is used as a "get" */ UINT32 MemNCmnGetSetFieldON ( IN OUT MEM_NB_BLOCK *NBPtr, IN UINT8 IsSet, IN BIT_FIELD_NAME FieldName, IN UINT32 Field ) { TSEFO Address; PCI_ADDR PciAddr; UINT8 Type; UINT8 IsLinked; UINT32 Value; UINT32 Highbit; UINT32 Lowbit; UINT32 Mask; UINT8 IsPhyDirectAccess; UINT8 IsWholeRegAccess; UINT8 NumOfInstances; UINT8 Instance; Value = 0; if ((FieldName < BFEndOfList) && (FieldName >= 0)) { Address = NBPtr->NBRegTable[FieldName]; if (Address) { Lowbit = TSEFO_END (Address); Highbit = TSEFO_START (Address); Type = (UINT8) TSEFO_TYPE (Address); IsLinked = (UINT8) TSEFO_LINKED (Address); IsPhyDirectAccess = (UINT8) TSEFO_DIRECT_EN (Address); IsWholeRegAccess = (UINT8) TSEFO_WHOLE_REG_ACCESS (Address); ASSERT ((Address & ((UINT32) 1) << 29) == 0); // Old Phy direct access method is not supported Address = TSEFO_OFFSET (Address); // By default, a bit field has only one instance NumOfInstances = 1; if ((Type == DCT_PHY_ACCESS) && IsPhyDirectAccess) { Address |= PHY_DIRECT_ADDRESS_MASK; if (IsWholeRegAccess) { // In the case of whole regiter access (bit 0 to 15), // HW broadcast and nibble mask will be used. Address |= Lowbit << 16; Lowbit = 0; Highbit = 15; } else { // In the case only some bits on a register is accessed, // BIOS will do read-mod-write to all chiplets manually. // And nibble mask will be 1111b always. Address |= 0x000F0000; Field >>= Lowbit; if ((Address & 0x0F00) == 0x0F00) { // Broadcast mode // Find out how many instances to write to NumOfInstances = InstancesPerTypeON[(Address >> 13) & 0x7]; if (!IsSet) { // For read, only read from instance 0 in broadcast mode NumOfInstances = 1; } } } } ASSERT (NumOfInstances > 0); for (Instance = 0; Instance < NumOfInstances; Instance++) { if (Type == NB_ACCESS) { PciAddr.AddressValue = Address; PciAddr.Address.Device = NBPtr->PciAddr.Address.Device; PciAddr.Address.Bus = NBPtr->PciAddr.Address.Bus; PciAddr.Address.Segment = NBPtr->PciAddr.Address.Segment; Address = PciAddr.AddressValue; LibAmdPciRead (AccessWidth32, PciAddr, &Value, &NBPtr->MemPtr->StdHeader); if ((FieldName != BFDctAddlDataReg) && (FieldName != BFDctAddlOffsetReg) && (FieldName != BFDctExtraDataReg) && (FieldName != BFDctExtraOffsetReg)) { IDS_HDT_CONSOLE (MEM_GETREG, "~Fn%d_%03x = %x\n", (Address >> 12) & 0xF, Address & 0xFFF, Value); } } else if (Type == DCT_PHY_ACCESS) { if (IsPhyDirectAccess && (NumOfInstances > 1)) { Address = (Address & 0x0FFFF0FF) | (((UINT32) Instance) << 8); } MemNSetBitFieldNb (NBPtr, BFDctAddlOffsetReg, Address); MemNPollBitFieldNb (NBPtr, BFDctAccessDone, 1, PCI_ACCESS_TIMEOUT, FALSE); Value = MemNGetBitFieldNb (NBPtr, BFDctAddlDataReg); IDS_HDT_CONSOLE (MEM_GETREG, "~Fn2_%d9C_%x = %x\n", NBPtr->Dct, Address & 0x0FFFFFFF, Value); } else if (Type == DCT_EXTRA) { MemNSetBitFieldNb (NBPtr, BFDctExtraOffsetReg, Address); MemNPollBitFieldNb (NBPtr, BFDctExtraAccessDone, 1, PCI_ACCESS_TIMEOUT, FALSE); Value = MemNGetBitFieldNb (NBPtr, BFDctExtraDataReg); IDS_HDT_CONSOLE (MEM_GETREG, "~Fn2_%dF4_%x = %x\n", NBPtr->Dct, Address & 0x0FFFFFFF, Value); } else { IDS_ERROR_TRAP; } if (IsSet) { // A 1<<32 == 1<<0 due to x86 SHL instruction, so skip if that is the case if ((Highbit - Lowbit) != 31) { Mask = (((UINT32)1 << (Highbit - Lowbit + 1)) - 1); } else { Mask = (UINT32)0xFFFFFFFF; } Value &= ~(Mask << Lowbit); Value |= (Field & Mask) << Lowbit; if (Type == NB_ACCESS) { PciAddr.AddressValue = Address; LibAmdPciWrite (AccessWidth32, PciAddr, &Value, &NBPtr->MemPtr->StdHeader); if ((FieldName != BFDctAddlDataReg) && (FieldName != BFDctAddlOffsetReg) && (FieldName != BFDctExtraDataReg) && (FieldName != BFDctExtraOffsetReg)) { IDS_HDT_CONSOLE (MEM_SETREG, "~Fn%d_%03x [%d:%d] = %x\n", (Address >> 12) & 0xF, Address & 0xFFF, Highbit, Lowbit, Field); } } else if (Type == DCT_PHY_ACCESS) { MemNSetBitFieldNb (NBPtr, BFDctAddlDataReg, Value); Address |= DCT_ACCESS_WRITE; MemNSetBitFieldNb (NBPtr, BFDctAddlOffsetReg, Address); MemNPollBitFieldNb (NBPtr, BFDctAccessDone, 1, PCI_ACCESS_TIMEOUT, FALSE); IDS_HDT_CONSOLE (MEM_SETREG, "~Fn2_%d9C_%x [%d:%d] = %x\n", NBPtr->Dct, Address & 0x0FFFFFFF, Highbit, Lowbit, Field); } else if (Type == DCT_EXTRA) { MemNSetBitFieldNb (NBPtr, BFDctExtraDataReg, Value); Address |= DCT_ACCESS_WRITE; MemNSetBitFieldNb (NBPtr, BFDctExtraOffsetReg, Address); MemNPollBitFieldNb (NBPtr, BFDctExtraAccessDone, 1, PCI_ACCESS_TIMEOUT, FALSE); IDS_HDT_CONSOLE (MEM_SETREG, "~Fn2_%dF4_%x [%d:%d] = %x\n", NBPtr->Dct, Address & 0x0FFFFFFF, Highbit, Lowbit, Field); } else { IDS_ERROR_TRAP; } if (IsLinked) { MemNCmnGetSetFieldON (NBPtr, 1, FieldName + 1, Field >> (Highbit - Lowbit + 1)); } } else { Value = Value >> Lowbit; // Shift // A 1<<32 == 1<<0 due to x86 SHL instruction, so skip if that is the case if ((Highbit - Lowbit) != 31) { Value &= (((UINT32)1 << (Highbit - Lowbit + 1)) - 1); } if (IsLinked) { Value |= MemNCmnGetSetFieldON (NBPtr, 0, FieldName + 1, 0) << (Highbit - Lowbit + 1); } // For direct phy access, shift the bit back for compatibility reason. if ((Type == DCT_PHY_ACCESS) && IsPhyDirectAccess) { Value <<= Lowbit; } } } } } else { IDS_ERROR_TRAP; // Invalid bit field index } return Value; } /* -----------------------------------------------------------------------------*/ /** * * This function initializes bit field translation table * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in,out] NBRegTable[] - Pointer to the bit field data structure * */ VOID MemNInitNBRegTableON ( IN OUT MEM_NB_BLOCK *NBPtr, IN OUT TSEFO NBRegTable[] ) { UINT16 i; for (i = 0; i < BFEndOfList; i++) { NBRegTable[i] = 0; } MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (0, 0x00), 31, 0, BFDevVendorIDReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (1, 0x40), 31, 0, BFDramBaseReg0); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (1, 0x44), 31, 0, BFDramLimitReg0); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (1, 0xF0), 31, 24, BFDramHoleBase); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (1, 0xF0), 15, 7, BFDramHoleOffset); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (1, 0xF0), 0, 0, BFDramHoleValid); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (1, 0xF0), 31, 0, BFDramHoleAddrReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x40), 31, 0, BFCSBaseAddr0Reg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x44), 31, 0, BFCSBaseAddr1Reg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x48), 31, 0, BFCSBaseAddr2Reg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x4C), 31, 0, BFCSBaseAddr3Reg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x60), 31, 0, BFCSMask0Reg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x64), 31, 0, BFCSMask1Reg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 31, 0, BFDramControlReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 31, 0, BFDramInitRegReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x80), 31, 0, BFDramBankAddrReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x84), 31, 0, BFDramMRSReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x88), 31, 0, BFDramTimingLoReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 31, 0, BFDramTimingHiReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 31, 0, BFDramConfigLoReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 31, 0, BFDramConfigHiReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x98), 31, 0, BFDctAddlOffsetReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x98), 31, 31, BFDctAccessDone); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x9C), 31, 0, BFDctAddlDataReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xA0), 31, 0, BFDramConfigMiscReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xA8), 31, 0, BFDramCtrlMiscReg2); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xF0), 31, 0, BFDctExtraOffsetReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xF0), 31, 31, BFDctExtraAccessDone); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xF4), 31, 0, BFDctExtraDataReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x11C), 31, 0, BFMctCfgHiReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x118), 31, 0, BFMctCfgLoReg); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 13, 8, BFNonSPDHi); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 3, 0, BFRdPtrInit); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 6, 6, BFRxPtrInitReq); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 9, 8, BFTwrrdHi); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 11, 10, BFTwrwrHi); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 13, 12, BFTrdrdHi); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 17, 17, BFAddrCmdTriEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 20, 20, BFForceCasToSlot0); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 21, 21, BFDisCutThroughMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x78), 31, 22, BFMaxLatency); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 15, 0, BFMrsAddress); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 18, 16, BFMrsBank); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 22, 20, BFMrsChipSel); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 24, 24, BFSendPchgAll); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 25, 25, BFSendAutoRefresh); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 26, 26, BFSendMrsCmd); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 27, 27, BFDeassertMemRstX); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 28, 28, BFAssertCke); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 29, 29, BFSendZQCmd); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x7C), 31, 31, BFEnDramInit); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x84), 1, 0, BFBurstCtrl); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x84), 3, 2, BFDrvImpCtrl); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x84), 6, 4, BFTwrDDR3); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x84), 22, 20, BFTcwl); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x84), 23, 23, BFPchgPDModeSel); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x88), 3, 0, BFTcl); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x88), 31, 24, BFMemClkDis); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 15, 0, BFNonSPD); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 3, 0, BFTrwtWB); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 7, 4, BFTrwtTO); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 11, 10, BFTwrrd); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 13, 12, BFTwrwr); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 15, 14, BFTrdrd); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 17, 16, BFTref); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 18, 18, BFDisAutoRefresh); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 22, 20, BFTrfc0); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x8C), 25, 23, BFTrfc1); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 0, 0, BFInitDram); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 1, 1, BFExitSelfRef); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 17, 17, BFEnterSelfRef); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 22, 21, BFIdleCycInit); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 25, 25, BFEnDispAutoPrecharge); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 26, 26, BFDbeSkidBufDis); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x90), 27, 27, BFDisDllShutdownSR); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 4, 0, BFMemClkFreq); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 7, 7, BFMemClkFreqVal); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 11, 10, BFZqcsInterval); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 14, 14, BFDisDramInterface); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 15, 15, BFPowerDownEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 16, 16, BFPowerDownMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 20, 20, BFSlowAccessMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 22, 22, BFBankSwizzleMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 27, 24, BFDcqBypassMax); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x94), 31, 28, BFFourActWindow); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xA4), 0, 0, BFDoubleTrefRateEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xA4), 2, 1, BFThrottleEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xA8), 22, 21, BFDbeGskMemClkAlignMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xAC), 0, 0, BFMemTempHot); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0xC0), 0, 0, BFTraceModeEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x110), 3, 3, BFMemClrInit); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x110), 8, 8, BFDramEnabled); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x110), 9, 9, BFMemClrBusy); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x110), 10, 10, BFMemCleared); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x114), 9, 9, BFDctSelBankSwap); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x118), 19, 19, BFC6DramLock); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x11C), 12, 12, BFPrefCpuDis); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x11C), 6, 2, BFDctWrLimit); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C0), 23, 23, BFRdTrainGo); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C0), 22, 22, BFRdDramTrainMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C0), 20, 20, BFDramTrainPdbDis); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C0), 17, 2, BFTrainLength); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C0), 1, 1, BFWrTrainGo); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C0), 0, 0, BFWrDramTrainMode); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1C8), 31, 0, BFWrTrainAdrPtrLo); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1CC), 17, 16, BFWrTrainAdrPtrHi); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1D0), 9, 0, BFWrTrainBufAddr); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1D4), 31, 0, BFWrTrainBufDat); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1E8), 15, 8, BFTrainCmpSts2); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (2, 0x1E8), 7, 0, BFTrainCmpSts); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0xDC), 19, 19, BFNclkFreqDone); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0xD4), 5, 0, BFMainPllOpFreqId); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0xDC), 26, 20, BFNbPs0NclkDiv); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0xE8), 7, 5, BFDdrMaxRate); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0x188), 22, 22, BFEnCpuSerRdBehindNpIoWr); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (6, 0x90), 6, 0, BFNbPs1NclkDiv); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (6, 0x90), 28, 28, BFNbPsForceReq); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (6, 0x90), 30, 30, BFNbPsCtrlDis); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (6, 0x98), 30, 30, BFNbPsCsrAccSel); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (6, 0x98), 31, 31, BFNbPsDbgEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (4, 0x12C), 11, 0, BFC6Base); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (4, 0x164), 0, 0, BFFixedErrataSkipPorFreqCap); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (4, 0x1A8), 29, 29, BFDramSrHysEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (4, 0x1A8), 28, 26, BFDramSrHys); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (4, 0x1A8), 25, 25, BFMemTriStateEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (4, 0x1A8), 24, 24, BFDramSrEn); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0x84), 31, 0, BFAcpiPwrStsCtrlHi); MAKE_TSEFO (NBRegTable, NB_ACCESS, _FN (3, 0x1FC), 2, 2, BFLowPowerDefault); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 2, 0, BFCkeDrvStren); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 6, 4, BFCsOdtDrvStren); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 10, 8, BFAddrCmdDrvStren); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 14, 12, BFClkDrvStren); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 18, 16, BFDataDrvStren); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 22, 20, BFDqsDrvStren); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 30, 28, BFProcOdt); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x00, 31, 0, BFODCControl); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x04, 31, 0, BFAddrTmgControl); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 0, 0, BFWrtLvTrEn); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 1, 1, BFWrtLvTrMode); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 3, 3, BFPhyFenceTrEn); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 4, 4, BFTrDimmSel); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 7, 6, BFFenceTrSel); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 11, 8, BFWrLvOdt); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 12, 12, BFWrLvOdtEn); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x08, 13, 13, BFDqsRcvTrEn); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0B, 31, 0, BFDramPhyStatusReg); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0C, 20, 16, BFPhyFence); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0C, 13, 12, BFCKETri); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0C, 11, 8, BFODTTri); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0C, 7, 0, BFChipSelTri); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0D, 25, 24, BFRxDLLWakeupTime); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0D, 22, 20, BFRxCPUpdPeriod); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0D, 19, 16, BFRxMaxDurDllNoLock); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0D, 9, 8, BFTxDLLWakeupTime); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0D, 6, 4, BFTxCPUpdPeriod); MAKE_TSEFO (NBRegTable, DCT_PHY_ACCESS, 0x0D, 3, 0, BFTxMaxDurDllNoLock); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F10, 12, 12, BFEnRxPadStandby); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F13, 7, 0, BFPhy0x0D0F0F13Bit0to7); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FE003, 14, 13, BFDisablePredriverCal); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FE006, 15, 0, BFPllLockTime); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2030, 4, 4, BFPhyClkConfig0); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2130, 4, 4, BFPhyClkConfig1); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC000, 8, 8, BFLowPowerDrvStrengthEn); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F812F, 15, 0, BFAddrCmdTri); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F0F, 14, 12, BFAlwaysEnDllClks); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F31, 14, 0, BFDataFence2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2F31, 4, 0, BFClkFence2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8F31, 4, 0, BFCmdFence2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC031, 4, 0, BFAddrFence2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F02, 15, 0, BFDataByteTxPreDriverCal); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F06, 15, 0, BFDataByteTxPreDriverCal2Pad1); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F0A, 15, 0, BFDataByteTxPreDriverCal2Pad2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8006, 15, 0, BFCmdAddr0TxPreDriverCal2Pad1); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F800A, 15, 0, BFCmdAddr0TxPreDriverCal2Pad2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8106, 15, 0, BFCmdAddr1TxPreDriverCal2Pad1); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F810A, 15, 0, BFCmdAddr1TxPreDriverCal2Pad2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC006, 15, 0, BFAddrTxPreDriverCal2Pad1); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC00A, 15, 0, BFAddrTxPreDriverCal2Pad2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC00E, 15, 0, BFAddrTxPreDriverCal2Pad3); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC012, 15, 0, BFAddrTxPreDriverCal2Pad4); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8002, 15, 0, BFCmdAddr0TxPreDriverCalPad0); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8102, 15, 0, BFCmdAddr1TxPreDriverCalPad0); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC002, 15, 0, BFAddrTxPreDriverCalPad0); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2002, 15, 0, BFClock0TxPreDriverCalPad0); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2102, 15, 0, BFClock1TxPreDriverCalPad0); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F1C00, 15, 0, BFPNOdtCal); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F1D00, 15, 0, BFPNDrvCal); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D081E00, 15, 0, BFCalVal); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F1F, 4, 3, BFDataRxVioLvl); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2F1F, 4, 3, BFClkRxVioLvl); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F4009, 15, 14, BFCmpVioLvl); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8F1F, 4, 3, BFCmdRxVioLvl); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FC01F, 4, 3, BFAddrRxVioLvl); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F00, 6, 4, BFDQOdt03); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F08, 6, 4, BFDQOdt47); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F1E, 14, 12, BFDllCSRBisaTrimDByte); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F2F1E, 14, 12, BFDllCSRBisaTrimClk); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F8F1E, 14, 12, BFDllCSRBisaTrimCsOdt); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0FCF1E, 14, 12, BFDllCSRBisaTrimAByte2); MAKE_TSEFO (NBRegTable, DCT_PHY_DIRECT, 0x0D0F0F38, 14, 13, BFReduceLoop); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x06, 11, 8, BFTwrrdSD); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x06, 3, 0, BFTrdrdSD); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x16, 3, 0, BFTwrwrSD); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x30, 12, 0, BFDbeGskFifoNumerator); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x31, 12, 0, BFDbeGskFifoDenominator); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x32, 4, 0, BFDataTxFifoSchedDlySlot0); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x32, 7, 7, BFDataTxFifoSchedDlyNegSlot0); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x32, 12, 8, BFDataTxFifoSchedDlySlot1); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x32, 15, 15, BFDataTxFifoSchedDlyNegSlot1); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x40, 3, 0, BFTrcd); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x40, 11, 8, BFTrp); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x40, 20, 16, BFTras); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x40, 29, 24, BFTrc); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x41, 2, 0, BFTrtp); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x41, 10, 8, BFTrrd); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x41, 18, 16, BFTwtr); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x83, 2, 0, BFRdOdtTrnOnDly); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x83, 6, 4, BFRdOdtOnDuration); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x83, 8, 8, BFWrOdtTrnOnDly); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x83, 14, 12, BFWrOdtOnDuration); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x180, 31, 0, BFRdOdtPatReg); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x182, 31, 0, BFWrOdtPatReg); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x200, 3, 0, BFTxp); MAKE_TSEFO (NBRegTable, DCT_EXTRA, 0x200, 12, 8, BFTxpdll); LINK_TSEFO (NBRegTable, BFTwrrd, BFTwrrdHi); LINK_TSEFO (NBRegTable, BFTwrwr, BFTwrwrHi); LINK_TSEFO (NBRegTable, BFTrdrd, BFTrdrdHi); }
gpl-2.0
xjzhou/oceanbase
oceanbase_0.4/src/common/ob_server.cpp
11
8655
/* * (C) 2007-2010 Taobao Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * * Version: 0.1 * * Authors: * qushan <qushan@taobao.com> * - some work details if you want * */ #include "ob_server.h" #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <netdb.h> #include "utility.h" namespace oceanbase { namespace common { // -------------------------------------------------------- // class ObServer implements // -------------------------------------------------------- uint32_t ObServer::convert_ipv4_addr(const char *ip) { if (NULL == ip) return 0; uint32_t x = inet_addr(ip); if (x == INADDR_NONE) { struct hostent *hp = NULL; if ((hp = gethostbyname(ip)) == NULL) { return 0; } x = ((struct in_addr *)hp->h_addr)->s_addr; } return x; } int64_t ObServer::to_string(char* buffer, const int64_t size) const { int64_t pos = 0; if (NULL != buffer && size > 0) { // databuff_printf(buffer, size, pos, "version=%d ", version_); if (version_ == IPV4) { // ip.v4_ is network byte order if (port_ > 0) { databuff_printf(buffer, size, pos, "%d.%d.%d.%d:%d", (this->ip.v4_ & 0xFF), (this->ip.v4_ >> 8) & 0xFF, (this->ip.v4_ >> 16) & 0xFF, (this->ip.v4_ >> 24) & 0xFF, port_); } else { databuff_printf(buffer, size, pos, "%d.%d.%d.%d", (this->ip.v4_ & 0xFF), (this->ip.v4_ >> 8) & 0xFF, (this->ip.v4_ >> 16) & 0xFF, (this->ip.v4_ >> 24) & 0xFF); } } } return pos; } bool ObServer::ip_to_string(char* buffer, const int32_t size) const { bool res = false; if (NULL != buffer && size > 0) { if (version_ == IPV4) { // ip.v4_ is network byte order snprintf(buffer, size, "%d.%d.%d.%d", (this->ip.v4_ & 0xFF), (this->ip.v4_ >> 8) & 0xFF, (this->ip.v4_ >> 16) & 0xFF, (this->ip.v4_ >> 24) & 0xFF); } res = true; } return res; } const char* ObServer::to_cstring() const { static const int64_t BUFFER_NUM = 16; static __thread char buff[BUFFER_NUM][OB_IP_STR_BUFF]; static __thread int64_t i = 0; i++; memset(buff[i % BUFFER_NUM], 0, OB_IP_STR_BUFF); to_string(buff[i % BUFFER_NUM], OB_IP_STR_BUFF); return buff[ i % BUFFER_NUM]; } bool ObServer::set_ipv4_addr(const char* ip, const int32_t port) { bool res = true; if (NULL == ip || port <= 0) { res = false; } if (res) { version_ = IPV4; port_ = port; this->ip.v4_ = convert_ipv4_addr(ip); } return res; } bool ObServer::set_ipv4_addr(const int32_t ip, const int32_t port) { version_ = IPV4; this->ip.v4_ = ip; this->port_ = port; return true; } //this is only for test void ObServer::reset_ipv4_10(int ip) { this->ip.v4_ = this->ip.v4_ & 0xFFFFFF00L; this->ip.v4_ += ip; } int64_t ObServer::get_ipv4_server_id() const { int64_t server_id = 0; if (version_ == IPV4) { server_id = this->port_; server_id <<= 32; server_id |= this->ip.v4_; } return server_id; } bool ObServer::operator ==(const ObServer& rv) const { bool res = true; if (version_ != rv.version_) { res = false; } else if (port_ != rv.port_) { res = false; } else { if (version_ == IPV4) { if (ip.v4_ != rv.ip.v4_) { res = false; } } else if (version_ == IPV6) { if (ip.v6_[0] != rv.ip.v6_[0] || ip.v6_[1] != rv.ip.v6_[1] || ip.v6_[2] != rv.ip.v6_[2] || ip.v6_[3] != rv.ip.v6_[3] ) { res = false; } } } return res; } bool ObServer::operator !=(const ObServer& rv) const { bool res = false; if (*this == rv) { res = false; } else { res = true; } return res; } bool ObServer::compare_by_ip(const ObServer& rv) const { bool res = true; if (version_ != rv.version_) { res = version_ < rv.version_; } else { if (version_ == IPV4) { res = ip.v4_ < rv.ip.v4_; } else if (version_ == IPV6) { res = memcmp(ip.v6_, rv.ip.v6_, sizeof(uint32_t) * 4) < 0; } } return res; } bool ObServer::is_same_ip(const ObServer& rv) const { bool res = true; if (version_ != rv.version_) { res = false; } else { if (version_ == IPV4) { if (ip.v4_ != rv.ip.v4_) { res = false; } } else if (version_ == IPV6) { if (ip.v6_[0] != rv.ip.v6_[0] || ip.v6_[1] != rv.ip.v6_[1] || ip.v6_[2] != rv.ip.v6_[2] || ip.v6_[3] != rv.ip.v6_[3] ) { res = false; } } } return res; } bool ObServer::operator < (const ObServer& rv) const { bool res = compare_by_ip(rv); // a >= b if (false == res) { // b >= a if (false == rv.compare_by_ip(*this)) { res = port_ < rv.port_; } } return res; } int32_t ObServer::get_version() const { return version_; } int32_t ObServer::get_port() const { return port_; } uint32_t ObServer::get_ipv4() const { return ip.v4_; } uint64_t ObServer::get_ipv6_high() const { const uint64_t *p = reinterpret_cast<const uint64_t*>(&ip.v6_[0]); return *p; } uint64_t ObServer::get_ipv6_low() const { const uint64_t *p = reinterpret_cast<const uint64_t*>(&ip.v6_[2]); return *p; } void ObServer::set_max() { ip.v4_ = UINT32_MAX; port_ = UINT32_MAX; for (int i=0; i<4; i++) { ip.v6_[i] = UINT32_MAX; } } void ObServer::set_port(int32_t port) { port_ = port; } DEFINE_SERIALIZE(ObServer) { int ret = OB_ERROR; ret = serialization::encode_vi32(buf, buf_len, pos, version_); if (ret == OB_SUCCESS) ret = serialization::encode_vi32(buf, buf_len, pos, port_); if (ret == OB_SUCCESS) { if (version_ == IPV4) { ret = serialization::encode_vi32(buf, buf_len, pos, ip.v4_); } else { // ipv6 for (int i=0; i<4; i++) { ret = serialization::encode_vi32(buf, buf_len, pos, ip.v6_[i]); if (ret != OB_SUCCESS) break; } } } return ret; } DEFINE_DESERIALIZE(ObServer) { int ret = OB_ERROR; ret = serialization::decode_vi32(buf, data_len, pos, &version_); if (ret == OB_SUCCESS) ret = serialization::decode_vi32(buf, data_len, pos, &port_); if (ret == OB_SUCCESS) { if (version_ == IPV4) { ret = serialization::decode_vi32(buf, data_len, pos, (int32_t*)&(ip.v4_)); } else { for (int i=0; i<4; i++) { ret = serialization::decode_vi32(buf, data_len, pos, (int32_t*)(ip.v6_ + i)); if (ret != OB_SUCCESS) break; } } } return ret; } DEFINE_GET_SERIALIZE_SIZE(ObServer) { int64_t total_size = 0; total_size += serialization::encoded_length_vi32(version_); total_size += serialization::encoded_length_vi32(port_); if (version_ == IPV4) { total_size += serialization::encoded_length_vi32(ip.v4_); } else { // ipv6 for (int i=0; i<4; i++) { total_size += serialization::encoded_length_vi32(ip.v6_[i]); } } return total_size; } } // end namespace common } // end namespace oceanbase
gpl-2.0
Lillecarl/mangos-classic
src/realmd/Main.cpp
11
11644
/* * This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /// \addtogroup realmd Realm Daemon /// @{ /// \file #include "Common.h" #include "Database/DatabaseEnv.h" #include "RealmList.h" #include "Config/Config.h" #include "Log.h" #include "AuthSocket.h" #include "SystemConfig.h" #include "revision.h" #include "revision_sql.h" #include "Util.h" #include "Network/Listener.hpp" #include <openssl/opensslv.h> #include <openssl/crypto.h> #include <boost/program_options.hpp> #include <boost/version.hpp> #include <iostream> #include <string> #include <chrono> #include <thread> #ifdef WIN32 #include "ServiceWin32.h" char serviceName[] = "realmd"; char serviceLongName[] = "MaNGOS realmd service"; char serviceDescription[] = "Massive Network Game Object Server"; /* * -1 - not in service mode * 0 - stopped * 1 - running * 2 - paused */ int m_ServiceStatus = -1; #else #include "PosixDaemon.h" #endif bool StartDB(); void UnhookSignals(); void HookSignals(); bool stopEvent = false; ///< Setting it to true stops the server DatabaseType LoginDatabase; ///< Accessor to the realm server database /// Print out the usage string for this program on the console. void usage(const char* prog) { sLog.outString("Usage: \n %s [<options>]\n" " -v, --version print version and exist\n\r" " -c config_file use config_file as configuration file\n\r" #ifdef WIN32 " Running as service functions:\n\r" " -s run run as service\n\r" " -s install install service\n\r" " -s uninstall uninstall service\n\r" #else " Running as daemon functions:\n\r" " -s run run as daemon\n\r" " -s stop stop daemon\n\r" #endif , prog); } /// Launch the realm server int main(int argc, char *argv[]) { std::string configFile, serviceParameter; boost::program_options::options_description desc("Allowed options"); desc.add_options() ("config,c", boost::program_options::value<std::string>(&configFile)->default_value(_REALMD_CONFIG), "configuration file") ("version,v", "print version and exit") #ifdef WIN32 ("s", boost::program_options::value<std::string>(&serviceParameter), "<run, install, uninstall> service"); #else ("s", boost::program_options::value<std::string>(&serviceParameter), "<run, stop> service"); #endif boost::program_options::variables_map vm; try { boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), vm); boost::program_options::notify(vm); } catch (boost::program_options::error const &e) { std::cerr << "ERROR: " << e.what() << std::endl << std::endl; std::cerr << desc << std::endl; return 1; } #ifdef WIN32 // windows service command need execute before config read if (vm.count("s")) { switch (::tolower(serviceParameter[0])) { case 'i': if (WinServiceInstall()) sLog.outString("Installing service"); return 1; case 'u': if (WinServiceUninstall()) sLog.outString("Uninstalling service"); return 1; case 'r': WinServiceRun(); break; } } #endif if (!sConfig.SetSource(configFile)) { sLog.outError("Could not find configuration file %s.", configFile.c_str()); Log::WaitBeforeContinueIfNeed(); return 1; } #ifndef WIN32 // posix daemon commands need apply after config read if (vm.count("s")) { switch (::tolower(serviceParameter[0])) { case 'r': startDaemon(); break; case 's': stopDaemon(); break; } } #endif sLog.Initialize(); sLog.outString("%s [realm-daemon]", _FULLVERSION(REVISION_DATE, REVISION_TIME, REVISION_ID)); sLog.outString("<Ctrl-C> to stop.\n"); sLog.outString("Using configuration file %s.", configFile.c_str()); ///- Check the version of the configuration file uint32 confVersion = sConfig.GetIntDefault("ConfVersion", 0); if (confVersion < _REALMDCONFVERSION) { sLog.outError("*****************************************************************************"); sLog.outError(" WARNING: Your realmd.conf version indicates your conf file is out of date!"); sLog.outError(" Please check for updates, as your current default values may cause"); sLog.outError(" strange behavior."); sLog.outError("*****************************************************************************"); Log::WaitBeforeContinueIfNeed(); } DETAIL_LOG("%s (Library: %s)", OPENSSL_VERSION_TEXT, SSLeay_version(SSLEAY_VERSION)); if (SSLeay() < 0x009080bfL) { DETAIL_LOG("WARNING: Outdated version of OpenSSL lib. Logins to server may not work!"); DETAIL_LOG("WARNING: Minimal required version [OpenSSL 0.9.8k]"); } /// realmd PID file creation std::string pidfile = sConfig.GetStringDefault("PidFile"); if (!pidfile.empty()) { uint32 pid = CreatePIDFile(pidfile); if (!pid) { sLog.outError("Cannot create PID file %s.\n", pidfile.c_str()); Log::WaitBeforeContinueIfNeed(); return 1; } sLog.outString("Daemon PID: %u\n", pid); } ///- Initialize the database connection if (!StartDB()) { Log::WaitBeforeContinueIfNeed(); return 1; } ///- Get the list of realms for the server sRealmList.Initialize(sConfig.GetIntDefault("RealmsStateUpdateDelay", 20)); if (sRealmList.size() == 0) { sLog.outError("No valid realms specified."); Log::WaitBeforeContinueIfNeed(); return 1; } // cleanup query // set expired bans to inactive LoginDatabase.BeginTransaction(); LoginDatabase.Execute("UPDATE account_banned SET active = 0 WHERE unbandate<=UNIX_TIMESTAMP() AND unbandate<>bandate"); LoginDatabase.Execute("DELETE FROM ip_banned WHERE unbandate<=UNIX_TIMESTAMP() AND unbandate<>bandate"); LoginDatabase.CommitTransaction(); auto rmport = sConfig.GetIntDefault("RealmServerPort", DEFAULT_REALMSERVER_PORT); std::string bind_ip = sConfig.GetStringDefault("BindIP", "0.0.0.0"); // FIXME - more intelligent selection of thread count is needed here. config option? MaNGOS::Listener<AuthSocket> listener(rmport, 1); ///- Catch termination signals HookSignals(); ///- Handle affinity for multiple processors and process priority on Windows #ifdef WIN32 { HANDLE hProcess = GetCurrentProcess(); uint32 Aff = sConfig.GetIntDefault("UseProcessors", 0); if (Aff > 0) { ULONG_PTR appAff; ULONG_PTR sysAff; if (GetProcessAffinityMask(hProcess, &appAff, &sysAff)) { ULONG_PTR curAff = Aff & appAff; // remove non accessible processors if (!curAff) { sLog.outError("Processors marked in UseProcessors bitmask (hex) %x not accessible for realmd. Accessible processors bitmask (hex): %x", Aff, appAff); } else { if (SetProcessAffinityMask(hProcess, curAff)) sLog.outString("Using processors (bitmask, hex): %x", curAff); else sLog.outError("Can't set used processors (hex): %x", curAff); } } sLog.outString(); } bool Prio = sConfig.GetBoolDefault("ProcessPriority", false); if (Prio) { if (SetPriorityClass(hProcess, HIGH_PRIORITY_CLASS)) sLog.outString("realmd process priority class set to HIGH"); else sLog.outError("Can't set realmd process priority class."); sLog.outString(); } } #endif // server has started up successfully => enable async DB requests LoginDatabase.AllowAsyncTransactions(); // maximum counter for next ping auto const numLoops = sConfig.GetIntDefault("MaxPingTime", 30) * MINUTE * 10; uint32 loopCounter = 0; #ifndef WIN32 detachDaemon(); #endif ///- Wait for termination signal while (!stopEvent) { if ((++loopCounter) == numLoops) { loopCounter = 0; DETAIL_LOG("Ping MySQL to keep connection alive"); LoginDatabase.Ping(); } std::this_thread::sleep_for(std::chrono::milliseconds(100)); #ifdef WIN32 if (m_ServiceStatus == 0) stopEvent = true; while (m_ServiceStatus == 2) Sleep(1000); #endif } ///- Wait for the delay thread to exit LoginDatabase.HaltDelayThread(); ///- Remove signal handling before leaving UnhookSignals(); sLog.outString("Halting process..."); return 0; } /// Handle termination signals /** Put the global variable stopEvent to 'true' if a termination signal is caught **/ void OnSignal(int s) { switch (s) { case SIGINT: case SIGTERM: stopEvent = true; break; #ifdef _WIN32 case SIGBREAK: stopEvent = true; break; #endif } signal(s, OnSignal); } /// Initialize connection to the database bool StartDB() { std::string dbstring = sConfig.GetStringDefault("LoginDatabaseInfo"); if (dbstring.empty()) { sLog.outError("Database not specified"); return false; } sLog.outString("Login Database total connections: %i", 1 + 1); if (!LoginDatabase.Initialize(dbstring.c_str())) { sLog.outError("Cannot connect to database"); return false; } if (!LoginDatabase.CheckRequiredField("realmd_db_version", REVISION_DB_REALMD)) { ///- Wait for already started DB delay threads to end LoginDatabase.HaltDelayThread(); return false; } return true; } /// Define hook 'OnSignal' for all termination signals void HookSignals() { signal(SIGINT, OnSignal); signal(SIGTERM, OnSignal); #ifdef _WIN32 signal(SIGBREAK, OnSignal); #endif } /// Unhook the signals before leaving void UnhookSignals() { signal(SIGINT, 0); signal(SIGTERM, 0); #ifdef _WIN32 signal(SIGBREAK, 0); #endif } /// @}
gpl-2.0
CyberGrandChallenge/linux-source-3.13.11-ckt21-cgc
drivers/net/usb/cdc_ncm.c
11
36290
/* * cdc_ncm.c * * Copyright (C) ST-Ericsson 2010-2012 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> * * USB Host Driver for Network Control Model (NCM) * http://www.usb.org/developers/devclass_docs/NCM10.zip * * The NCM encoding, decoding and initialization logic * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h * * This software is available to you under a choice of one of two * licenses. You may choose this file to be licensed under the terms * of the GNU General Public License (GPL) Version 2 or the 2-clause * BSD license listed below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/ctype.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/usb.h> #include <linux/hrtimer.h> #include <linux/atomic.h> #include <linux/usb/usbnet.h> #include <linux/usb/cdc.h> #include <linux/usb/cdc_ncm.h> #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) static bool prefer_mbim = true; #else static bool prefer_mbim; #endif module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); static void cdc_ncm_txpath_bh(unsigned long param); static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); static struct usb_driver cdc_ncm_driver; static int cdc_ncm_setup(struct usbnet *dev) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; u32 val; u8 flags; u8 iface_no; int err; int eth_hlen; u16 ntb_fmt_supported; __le16 max_datagram_size; iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, USB_TYPE_CLASS | USB_DIR_IN |USB_RECIP_INTERFACE, 0, iface_no, &ctx->ncm_parm, sizeof(ctx->ncm_parm)); if (err < 0) { dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); return err; /* GET_NTB_PARAMETERS is required */ } /* read correct set of parameters according to device mode */ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); /* devices prior to NCM Errata shall set this field to zero */ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); /* there are some minor differences in NCM and MBIM defaults */ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { if (!ctx->mbim_desc) return -EINVAL; eth_hlen = 0; flags = ctx->mbim_desc->bmNetworkCapabilities; ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize); if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE) ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE; } else { if (!ctx->func_desc) return -EINVAL; eth_hlen = ETH_HLEN; flags = ctx->func_desc->bmNetworkCapabilities; ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; } /* common absolute max for NCM and MBIM */ if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE; dev_dbg(&dev->intf->dev, "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); /* max count of tx datagrams */ if ((ctx->tx_max_datagrams == 0) || (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; /* verify maximum size of received NTB in bytes */ if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { dev_dbg(&dev->intf->dev, "Using min receive length=%d\n", USB_CDC_NCM_NTB_MIN_IN_SIZE); ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; } if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n", CDC_NCM_NTB_MAX_SIZE_RX); ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; } /* inform device about NTB input size changes */ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 0, iface_no, &dwNtbInMaxSize, 4); if (err < 0) dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n"); } /* verify maximum size of transmitted NTB in bytes */ if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) { dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", CDC_NCM_NTB_MAX_SIZE_TX); ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; } /* * verify that the structure alignment is: * - power of two * - not greater than the maximum transmit length * - not less than four bytes */ val = ctx->tx_ndp_modulus; if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) || (val != ((-val) & val)) || (val >= ctx->tx_max)) { dev_dbg(&dev->intf->dev, "Using default alignment: 4 bytes\n"); ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE; } /* * verify that the payload alignment is: * - power of two * - not greater than the maximum transmit length * - not less than four bytes */ val = ctx->tx_modulus; if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) || (val != ((-val) & val)) || (val >= ctx->tx_max)) { dev_dbg(&dev->intf->dev, "Using default transmit modulus: 4 bytes\n"); ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE; } /* verify the payload remainder */ if (ctx->tx_remainder >= ctx->tx_modulus) { dev_dbg(&dev->intf->dev, "Using default transmit remainder: 0 bytes\n"); ctx->tx_remainder = 0; } /* adjust TX-remainder according to NCM specification. */ ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) & (ctx->tx_modulus - 1)); /* additional configuration */ /* set CRC Mode */ if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, USB_CDC_NCM_CRC_NOT_APPENDED, iface_no, NULL, 0); if (err < 0) dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n"); } /* set NTB format, if both formats are supported */ if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, USB_CDC_NCM_NTB16_FORMAT, iface_no, NULL, 0); if (err < 0) dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n"); } /* inform the device about the selected Max Datagram Size */ if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE)) goto out; /* read current mtu value from device */ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, 0, iface_no, &max_datagram_size, 2); if (err < 0) { dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); goto out; } if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size) goto out; max_datagram_size = cpu_to_le16(ctx->max_datagram_size); err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 0, iface_no, &max_datagram_size, 2); if (err < 0) dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); out: /* set MTU to max supported by the device if necessary */ if (dev->net->mtu > ctx->max_datagram_size - eth_hlen) dev->net->mtu = ctx->max_datagram_size - eth_hlen; return 0; } static void cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf) { struct usb_host_endpoint *e, *in = NULL, *out = NULL; u8 ep; for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) { e = intf->cur_altsetting->endpoint + ep; switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_INT: if (usb_endpoint_dir_in(&e->desc)) { if (!dev->status) dev->status = e; } break; case USB_ENDPOINT_XFER_BULK: if (usb_endpoint_dir_in(&e->desc)) { if (!in) in = e; } else { if (!out) out = e; } break; default: break; } } if (in && !dev->in) dev->in = usb_rcvbulkpipe(dev->udev, in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); if (out && !dev->out) dev->out = usb_sndbulkpipe(dev->udev, out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); } static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) { if (ctx == NULL) return; if (ctx->tx_rem_skb != NULL) { dev_kfree_skb_any(ctx->tx_rem_skb); ctx->tx_rem_skb = NULL; } if (ctx->tx_curr_skb != NULL) { dev_kfree_skb_any(ctx->tx_curr_skb); ctx->tx_curr_skb = NULL; } kfree(ctx); } int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) { const struct usb_cdc_union_desc *union_desc = NULL; struct cdc_ncm_ctx *ctx; struct usb_driver *driver; u8 *buf; int len; int temp; u8 iface_no; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ctx->tx_timer.function = &cdc_ncm_tx_timer_cb; ctx->bh.data = (unsigned long)dev; ctx->bh.func = cdc_ncm_txpath_bh; atomic_set(&ctx->stop, 0); spin_lock_init(&ctx->mtx); /* store ctx pointer in device data field */ dev->data[0] = (unsigned long)ctx; /* only the control interface can be successfully probed */ ctx->control = intf; /* get some pointers */ driver = driver_of(intf); buf = intf->cur_altsetting->extra; len = intf->cur_altsetting->extralen; /* parse through descriptors associated with control interface */ while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) { if (buf[1] != USB_DT_CS_INTERFACE) goto advance; switch (buf[2]) { case USB_CDC_UNION_TYPE: if (buf[0] < sizeof(*union_desc)) break; union_desc = (const struct usb_cdc_union_desc *)buf; /* the master must be the interface we are probing */ if (intf->cur_altsetting->desc.bInterfaceNumber != union_desc->bMasterInterface0) { dev_dbg(&intf->dev, "bogus CDC Union\n"); goto error; } ctx->data = usb_ifnum_to_if(dev->udev, union_desc->bSlaveInterface0); break; case USB_CDC_ETHERNET_TYPE: if (buf[0] < sizeof(*(ctx->ether_desc))) break; ctx->ether_desc = (const struct usb_cdc_ether_desc *)buf; break; case USB_CDC_NCM_TYPE: if (buf[0] < sizeof(*(ctx->func_desc))) break; ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf; break; case USB_CDC_MBIM_TYPE: if (buf[0] < sizeof(*(ctx->mbim_desc))) break; ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf; break; default: break; } advance: /* advance to next descriptor */ temp = buf[0]; buf += temp; len -= temp; } /* some buggy devices have an IAD but no CDC Union */ if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); } /* check if we got everything */ if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) { dev_dbg(&intf->dev, "CDC descriptors missing\n"); goto error; } /* claim data interface, if different from control */ if (ctx->data != ctx->control) { temp = usb_driver_claim_interface(driver, ctx->data, dev); if (temp) { dev_dbg(&intf->dev, "failed to claim data intf\n"); goto error; } } iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; /* reset data interface */ temp = usb_set_interface(dev->udev, iface_no, 0); if (temp) { dev_dbg(&intf->dev, "set interface failed\n"); goto error2; } /* initialize data interface */ if (cdc_ncm_setup(dev)) goto error2; /* configure data interface */ temp = usb_set_interface(dev->udev, iface_no, data_altsetting); if (temp) { dev_dbg(&intf->dev, "set interface failed\n"); goto error2; } cdc_ncm_find_endpoints(dev, ctx->data); cdc_ncm_find_endpoints(dev, ctx->control); if (!dev->in || !dev->out || !dev->status) { dev_dbg(&intf->dev, "failed to collect endpoints\n"); goto error2; } usb_set_intfdata(ctx->data, dev); usb_set_intfdata(ctx->control, dev); if (ctx->ether_desc) { temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress); if (temp) { dev_dbg(&intf->dev, "failed to get mac address\n"); goto error2; } dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr); } /* usbnet use these values for sizing tx/rx queues */ dev->hard_mtu = ctx->tx_max; dev->rx_urb_size = ctx->rx_max; /* cdc_ncm_setup will override dwNtbOutMaxSize if it is * outside the sane range. Adding a pad byte here if necessary * simplifies the handling in cdc_ncm_fill_tx_frame, making * tx_max always represent the real skb max size. */ if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) ctx->tx_max++; return 0; error2: usb_set_intfdata(ctx->control, NULL); usb_set_intfdata(ctx->data, NULL); if (ctx->data != ctx->control) usb_driver_release_interface(driver, ctx->data); error: cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]); dev->data[0] = 0; dev_info(&intf->dev, "bind() failure\n"); return -ENODEV; } EXPORT_SYMBOL_GPL(cdc_ncm_bind_common); void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; struct usb_driver *driver = driver_of(intf); if (ctx == NULL) return; /* no setup */ atomic_set(&ctx->stop, 1); if (hrtimer_active(&ctx->tx_timer)) hrtimer_cancel(&ctx->tx_timer); tasklet_kill(&ctx->bh); /* handle devices with combined control and data interface */ if (ctx->control == ctx->data) ctx->data = NULL; /* disconnect master --> disconnect slave */ if (intf == ctx->control && ctx->data) { usb_set_intfdata(ctx->data, NULL); usb_driver_release_interface(driver, ctx->data); ctx->data = NULL; } else if (intf == ctx->data && ctx->control) { usb_set_intfdata(ctx->control, NULL); usb_driver_release_interface(driver, ctx->control); ctx->control = NULL; } usb_set_intfdata(intf, NULL); cdc_ncm_free(ctx); } EXPORT_SYMBOL_GPL(cdc_ncm_unbind); /* Select the MBIM altsetting iff it is preferred and available, * returning the number of the corresponding data interface altsetting */ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) { struct usb_host_interface *alt; /* The MBIM spec defines a NCM compatible default altsetting, * which we may have matched: * * "Functions that implement both NCM 1.0 and MBIM (an * “NCM/MBIM function”) according to this recommendation * shall provide two alternate settings for the * Communication Interface. Alternate setting 0, and the * associated class and endpoint descriptors, shall be * constructed according to the rules given for the * Communication Interface in section 5 of [USBNCM10]. * Alternate setting 1, and the associated class and * endpoint descriptors, shall be constructed according to * the rules given in section 6 (USB Device Model) of this * specification." */ if (prefer_mbim && intf->num_altsetting == 2) { alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); if (alt && cdc_ncm_comm_intf_is_mbim(alt) && !usb_set_interface(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber, CDC_NCM_COMM_ALTSETTING_MBIM)) return CDC_NCM_DATA_ALTSETTING_MBIM; } return CDC_NCM_DATA_ALTSETTING_NCM; } EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; /* MBIM backwards compatible function? */ cdc_ncm_select_altsetting(dev, intf); if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) return -ENODEV; /* NCM data altsetting is always 1 */ ret = cdc_ncm_bind_common(dev, intf, 1); /* * We should get an event when network connection is "connected" or * "disconnected". Set network connection in "disconnected" state * (carrier is OFF) during attach, so the IP network stack does not * start IPv6 negotiation and more. */ usbnet_link_change(dev, 0, 0); return ret; } static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max) { size_t align = ALIGN(skb->len, modulus) - skb->len + remainder; if (skb->len + align > max) align = max - skb->len; if (align && skb_tailroom(skb) >= align) memset(skb_put(skb, align), 0, align); } /* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly * allocating a new one within skb */ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve) { struct usb_cdc_ncm_ndp16 *ndp16 = NULL; struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data; size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex); /* follow the chain of NDPs, looking for a match */ while (ndpoffset) { ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); if (ndp16->dwSignature == sign) return ndp16; ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex); } /* align new NDP */ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); /* verify that there is room for the NDP and the datagram (reserve) */ if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE) return NULL; /* link to it */ if (ndp16) ndp16->wNextNdpIndex = cpu_to_le16(skb->len); else nth16->wNdpIndex = cpu_to_le16(skb->len); /* push a new empty NDP */ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE); ndp16->dwSignature = sign; ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); return ndp16; } struct sk_buff * cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; struct usb_cdc_ncm_nth16 *nth16; struct usb_cdc_ncm_ndp16 *ndp16; struct sk_buff *skb_out; u16 n = 0, index, ndplen; u8 ready2send = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) { swap(skb, ctx->tx_rem_skb); swap(sign, ctx->tx_rem_sign); } else { ready2send = 1; } /* check if we are resuming an OUT skb */ skb_out = ctx->tx_curr_skb; /* allocate a new OUT skb */ if (!skb_out) { skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC); if (skb_out == NULL) { if (skb != NULL) { dev_kfree_skb_any(skb); dev->net->stats.tx_dropped++; } goto exit_no_skb; } /* fill out the initial 16-bit NTB header */ nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16)); nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); nth16->wSequence = cpu_to_le16(ctx->tx_seq++); /* count total number of frames in this NTB */ ctx->tx_curr_frame_num = 0; } for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { /* send any remaining skb first */ if (skb == NULL) { skb = ctx->tx_rem_skb; sign = ctx->tx_rem_sign; ctx->tx_rem_skb = NULL; /* check for end of skb */ if (skb == NULL) break; } /* get the appropriate NDP for this skb */ ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder); /* align beginning of next frame */ cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); /* check if we had enough room left for both NDP and frame */ if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { if (n == 0) { /* won't fit, MTU problem? */ dev_kfree_skb_any(skb); skb = NULL; dev->net->stats.tx_dropped++; } else { /* no room for skb - store for later */ if (ctx->tx_rem_skb != NULL) { dev_kfree_skb_any(ctx->tx_rem_skb); dev->net->stats.tx_dropped++; } ctx->tx_rem_skb = skb; ctx->tx_rem_sign = sign; skb = NULL; ready2send = 1; } break; } /* calculate frame number withing this NDP */ ndplen = le16_to_cpu(ndp16->wLength); index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1; /* OK, add this skb */ ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len); ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len); ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16)); memcpy(skb_put(skb_out, skb->len), skb->data, skb->len); dev_kfree_skb_any(skb); skb = NULL; /* send now if this NDP is full */ if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) { ready2send = 1; break; } } /* free up any dangling skb */ if (skb != NULL) { dev_kfree_skb_any(skb); skb = NULL; dev->net->stats.tx_dropped++; } ctx->tx_curr_frame_num = n; if (n == 0) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; goto exit_no_skb; } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; /* set the pending count */ if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT) ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT; goto exit_no_skb; } else { /* frame goes out */ /* variables will be reset at next call */ } /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT * bytes, we send buffers as it is. If we get more data, it * would be more efficient for USB HS mobile device with DMA * engine to receive a full size NTB, than canceling DMA * transfer and receiving a short packet. * * This optimization support is pointless if we end up sending * a ZLP after full sized NTBs. */ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && skb_out->len > CDC_NCM_MIN_TX_PKT) memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len); else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) *skb_put(skb_out, 1) = 0; /* force short packet */ /* set final frame length */ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; nth16->wBlockLength = cpu_to_le16(skb_out->len); /* return skb */ ctx->tx_curr_skb = NULL; dev->net->stats.tx_packets += ctx->tx_curr_frame_num; return skb_out; exit_no_skb: /* Start timer, if there is a remaining skb */ if (ctx->tx_curr_skb != NULL) cdc_ncm_tx_timeout_start(ctx); return NULL; } EXPORT_SYMBOL_GPL(cdc_ncm_fill_tx_frame); static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx) { /* start timer, if not already started */ if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) hrtimer_start(&ctx->tx_timer, ktime_set(0, CDC_NCM_TIMER_INTERVAL), HRTIMER_MODE_REL); } static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer) { struct cdc_ncm_ctx *ctx = container_of(timer, struct cdc_ncm_ctx, tx_timer); if (!atomic_read(&ctx->stop)) tasklet_schedule(&ctx->bh); return HRTIMER_NORESTART; } static void cdc_ncm_txpath_bh(unsigned long param) { struct usbnet *dev = (struct usbnet *)param; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; spin_lock_bh(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); spin_unlock_bh(&ctx->mtx); } else if (dev->net != NULL) { spin_unlock_bh(&ctx->mtx); netif_tx_lock_bh(dev->net); usbnet_start_xmit(NULL, dev->net); netif_tx_unlock_bh(dev->net); } else { spin_unlock_bh(&ctx->mtx); } } struct sk_buff * cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb_out; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; /* * The Ethernet API we are using does not support transmitting * multiple Ethernet frames in a single call. This driver will * accumulate multiple Ethernet frames and send out a larger * USB frame when the USB buffer is full or when a single jiffies * timeout happens. */ if (ctx == NULL) goto error; spin_lock_bh(&ctx->mtx); skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)); spin_unlock_bh(&ctx->mtx); return skb_out; error: if (skb != NULL) dev_kfree_skb_any(skb); return NULL; } EXPORT_SYMBOL_GPL(cdc_ncm_tx_fixup); /* verify NTB header and return offset of first NDP, or negative error */ int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in) { struct usbnet *dev = netdev_priv(skb_in->dev); struct usb_cdc_ncm_nth16 *nth16; int len; int ret = -EINVAL; if (ctx == NULL) goto error; if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16))) { netif_dbg(dev, rx_err, dev->net, "frame too short\n"); goto error; } nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data; if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { netif_dbg(dev, rx_err, dev->net, "invalid NTH16 signature <%#010x>\n", le32_to_cpu(nth16->dwSignature)); goto error; } len = le16_to_cpu(nth16->wBlockLength); if (len > ctx->rx_max) { netif_dbg(dev, rx_err, dev->net, "unsupported NTB block length %u/%u\n", len, ctx->rx_max); goto error; } if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) && (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) && !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) { netif_dbg(dev, rx_err, dev->net, "sequence number glitch prev=%d curr=%d\n", ctx->rx_seq, le16_to_cpu(nth16->wSequence)); } ctx->rx_seq = le16_to_cpu(nth16->wSequence); ret = le16_to_cpu(nth16->wNdpIndex); error: return ret; } EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16); /* verify NDP header and return number of datagrams, or negative error */ int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset) { struct usbnet *dev = netdev_priv(skb_in->dev); struct usb_cdc_ncm_ndp16 *ndp16; int ret = -EINVAL; if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) { netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n", ndpoffset); goto error; } ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset); if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n", le16_to_cpu(ndp16->wLength)); goto error; } ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16)); ret--; /* we process NDP entries except for the last one */ if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) { netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret); ret = -EINVAL; } error: return ret; } EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16); int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; int len; int nframes; int x; int offset; struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_dpe16 *dpe16; int ndpoffset; int loopcount = 50; /* arbitrary max preventing infinite loop */ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in); if (ndpoffset < 0) goto error; next_ndp: nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset); if (nframes < 0) goto error; ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset); if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) { netif_dbg(dev, rx_err, dev->net, "invalid DPT16 signature <%#010x>\n", le32_to_cpu(ndp16->dwSignature)); goto err_ndp; } dpe16 = ndp16->dpe16; for (x = 0; x < nframes; x++, dpe16++) { offset = le16_to_cpu(dpe16->wDatagramIndex); len = le16_to_cpu(dpe16->wDatagramLength); /* * CDC NCM ch. 3.7 * All entries after first NULL entry are to be ignored */ if ((offset == 0) || (len == 0)) { if (!x) goto err_ndp; /* empty NTB */ break; } /* sanity checking */ if (((offset + len) > skb_in->len) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", x, offset, len, skb_in); if (!x) goto err_ndp; break; } else { skb = skb_clone(skb_in, GFP_ATOMIC); if (!skb) goto error; skb->len = len; skb->data = ((u8 *)skb_in->data) + offset; skb_set_tail_pointer(skb, len); usbnet_skb_return(dev, skb); } } err_ndp: /* are there more NDPs to process? */ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex); if (ndpoffset && loopcount--) goto next_ndp; return 1; error: return 0; } EXPORT_SYMBOL_GPL(cdc_ncm_rx_fixup); static void cdc_ncm_speed_change(struct usbnet *dev, struct usb_cdc_speed_change *data) { uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); uint32_t tx_speed = le32_to_cpu(data->ULBitRate); /* * Currently the USB-NET API does not support reporting the actual * device speed. Do print it instead. */ if ((tx_speed > 1000000) && (rx_speed > 1000000)) { netif_info(dev, link, dev->net, "%u mbit/s downlink %u mbit/s uplink\n", (unsigned int)(rx_speed / 1000000U), (unsigned int)(tx_speed / 1000000U)); } else { netif_info(dev, link, dev->net, "%u kbit/s downlink %u kbit/s uplink\n", (unsigned int)(rx_speed / 1000U), (unsigned int)(tx_speed / 1000U)); } } static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) { struct cdc_ncm_ctx *ctx; struct usb_cdc_notification *event; ctx = (struct cdc_ncm_ctx *)dev->data[0]; if (urb->actual_length < sizeof(*event)) return; /* test for split data in 8-byte chunks */ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { cdc_ncm_speed_change(dev, (struct usb_cdc_speed_change *)urb->transfer_buffer); return; } event = urb->transfer_buffer; switch (event->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: /* * According to the CDC NCM specification ch.7.1 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. */ ctx->connected = le16_to_cpu(event->wValue); netif_info(dev, link, dev->net, "network connection: %sconnected\n", ctx->connected ? "" : "dis"); usbnet_link_change(dev, ctx->connected, 0); break; case USB_CDC_NOTIFY_SPEED_CHANGE: if (urb->actual_length < (sizeof(*event) + sizeof(struct usb_cdc_speed_change))) set_bit(EVENT_STS_SPLIT, &dev->flags); else cdc_ncm_speed_change(dev, (struct usb_cdc_speed_change *)&event[1]); break; default: dev_dbg(&dev->udev->dev, "NCM: unexpected notification 0x%02x!\n", event->bNotificationType); break; } } static int cdc_ncm_check_connect(struct usbnet *dev) { struct cdc_ncm_ctx *ctx; ctx = (struct cdc_ncm_ctx *)dev->data[0]; if (ctx == NULL) return 1; /* disconnected */ return !ctx->connected; } static const struct driver_info cdc_ncm_info = { .description = "CDC NCM", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .check_connect = cdc_ncm_check_connect, .manage_power = usbnet_manage_power, .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, }; /* Same as cdc_ncm_info, but with FLAG_WWAN */ static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .check_connect = cdc_ncm_check_connect, .manage_power = usbnet_manage_power, .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, }; /* Same as wwan_info, but with FLAG_NOARP */ static const struct driver_info wwan_noarp_info = { .description = "Mobile Broadband Network Device (NO ARP)", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_NOARP, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .check_connect = cdc_ncm_check_connect, .manage_power = usbnet_manage_power, .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, }; static const struct usb_device_id cdc_devs[] = { /* Ericsson MBM devices like F5521gw */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR, .idVendor = 0x0bdb, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, .bInterfaceProtocol = USB_CDC_PROTO_NONE, .driver_info = (unsigned long) &wwan_info, }, /* Dell branded MBM devices like DW5550 */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR, .idVendor = 0x413c, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, .bInterfaceProtocol = USB_CDC_PROTO_NONE, .driver_info = (unsigned long) &wwan_info, }, /* Toshiba branded MBM devices */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR, .idVendor = 0x0930, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, .bInterfaceProtocol = USB_CDC_PROTO_NONE, .driver_info = (unsigned long) &wwan_info, }, /* tag Huawei devices as wwan */ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, /* Infineon(now Intel) HSPA Modem platform */ { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_noarp_info, }, /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_ncm_info, }, { }, }; MODULE_DEVICE_TABLE(usb, cdc_devs); static struct usb_driver cdc_ncm_driver = { .name = "cdc_ncm", .id_table = cdc_devs, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .reset_resume = usbnet_resume, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(cdc_ncm_driver); MODULE_AUTHOR("Hans Petter Selasky"); MODULE_DESCRIPTION("USB CDC NCM host driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
tobikausk/nest-simulator
nest/sli_neuron.cpp
11
7038
/* * sli_neuron.cpp * * This file is part of NEST. * * Copyright (C) 2004 The NEST Initiative * * NEST is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * NEST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with NEST. If not, see <http://www.gnu.org/licenses/>. * */ #include "sli_neuron.h" // C++ includes: #include <limits> // Includes from libnestutil: #include "compose.hpp" #include "numerics.h" // includes from nest: #include "neststartup.h" // get_engine() // Includes from nestkernel: #include "event_delivery_manager_impl.h" #include "exceptions.h" #include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" #include "dictstack.h" #include "dictutils.h" #include "doubledatum.h" #include "integerdatum.h" /* ---------------------------------------------------------------- * Recordables map * ---------------------------------------------------------------- */ nest::RecordablesMap< nest::sli_neuron > nest::sli_neuron::recordablesMap_; namespace nest { // Override the create() method with one call to RecordablesMap::insert_() // for each quantity to be recorded. template <> void RecordablesMap< sli_neuron >::create() { // use standard names whereever you can for consistency! insert_( names::V_m, &sli_neuron::get_V_m_ ); } } nest::sli_neuron::Buffers_::Buffers_( sli_neuron& n ) : logger_( n ) { } nest::sli_neuron::Buffers_::Buffers_( const Buffers_&, sli_neuron& n ) : logger_( n ) { } /* ---------------------------------------------------------------- * Default and copy constructor for node * ---------------------------------------------------------------- */ nest::sli_neuron::sli_neuron() : Archiving_Node() , state_( new Dictionary() ) , B_( *this ) { // We add empty defaults for /calibrate and /update, so that the uninitialized // node runs without errors. state_->insert( names::calibrate, new ProcedureDatum() ); state_->insert( names::update, new ProcedureDatum() ); recordablesMap_.create(); } nest::sli_neuron::sli_neuron( const sli_neuron& n ) : Archiving_Node( n ) , state_( new Dictionary( *n.state_ ) ) , B_( n.B_, *this ) { init_state_( n ); } /* ---------------------------------------------------------------- * Node initialization functions * ---------------------------------------------------------------- */ void nest::sli_neuron::init_state_( const Node& proto ) { const sli_neuron& pr = downcast< sli_neuron >( proto ); state_ = DictionaryDatum( new Dictionary( *pr.state_ ) ); } void nest::sli_neuron::init_buffers_() { B_.ex_spikes_.clear(); // includes resize B_.in_spikes_.clear(); // includes resize B_.currents_.clear(); // includes resize B_.logger_.reset(); // includes resize Archiving_Node::clear_history(); } void nest::sli_neuron::calibrate() { B_.logger_.init(); if ( not state_->known( names::calibrate ) ) { std::string msg = String::compose( "Node %1 has no /calibrate function in its status dictionary.", get_gid() ); throw BadProperty( msg ); } if ( not state_->known( names::update ) ) { std::string msg = String::compose( "Node %1 has no /update function in its status dictionary", get_gid() ); throw BadProperty( msg ); } #pragma omp critical( sli_neuron ) { execute_sli_protected( state_, names::calibrate_node ); // call interpreter } } /* ---------------------------------------------------------------- * Update and spike handling functions */ void nest::sli_neuron::update( Time const& origin, const long from, const long to ) { assert( to >= 0 && ( delay ) from < kernel().connection_manager.get_min_delay() ); assert( from < to ); ( *state_ )[ names::t_origin ] = origin.get_steps(); if ( state_->known( names::error ) ) { std::string msg = String::compose( "Node %1 still has its error state set.", get_gid() ); throw KernelException( msg ); } for ( long lag = from; lag < to; ++lag ) { ( *state_ )[ names::in_spikes ] = B_.in_spikes_.get_value( lag ); // in spikes arriving at right border ( *state_ )[ names::ex_spikes ] = B_.ex_spikes_.get_value( lag ); // ex spikes arriving at right border ( *state_ )[ names::currents ] = B_.currents_.get_value( lag ); ( *state_ )[ names::t_lag ] = lag; #pragma omp critical( sli_neuron ) { execute_sli_protected( state_, names::update_node ); // call interpreter } bool spike_emission = false; if ( state_->known( names::spike ) ) { spike_emission = ( *state_ )[ names::spike ]; } // threshold crossing if ( spike_emission ) { set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; kernel().event_delivery_manager.send( *this, se, lag ); } B_.logger_.record_data( origin.get_steps() + lag ); } } /** * This function is not thread save and has to be called inside a omp critical * region. */ int nest::sli_neuron::execute_sli_protected( DictionaryDatum state, Name cmd ) { SLIInterpreter& i = get_engine(); i.DStack->push( state ); // push state dictionary as top namespace size_t exitlevel = i.EStack.load(); i.EStack.push( new NameDatum( cmd ) ); int result = i.execute_( exitlevel ); i.DStack->pop(); // pop neuron's namespace if ( state->known( "error" ) ) { assert( state->known( names::global_id ) ); index g_id = ( *state )[ names::global_id ]; std::string model = getValue< std::string >( ( *state )[ names::model ] ); std::string msg = String::compose( "Error in %1 with global id %2.", model, g_id ); throw KernelException( msg ); } return result; } void nest::sli_neuron::handle( SpikeEvent& e ) { assert( e.get_delay() > 0 ); if ( e.get_weight() > 0.0 ) { B_.ex_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { B_.in_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } void nest::sli_neuron::handle( CurrentEvent& e ) { assert( e.get_delay() > 0 ); const double I = e.get_current(); const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); } void nest::sli_neuron::handle( DataLoggingRequest& e ) { B_.logger_.handle( e ); }
gpl-2.0
bayasist/vbox
src/VBox/Devices/EFI/Firmware/IntelFrameworkModulePkg/Library/LzmaCustomDecompressLib/GuidedSectionExtraction.c
11
8403
/** @file LZMA Decompress GUIDed Section Extraction Library. It wraps Lzma decompress interfaces to GUIDed Section Extraction interfaces and registers them into GUIDed handler table. Copyright (c) 2009 - 2011, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #include "LzmaDecompressLibInternal.h" /** Examines a GUIDed section and returns the size of the decoded buffer and the size of an scratch buffer required to actually decode the data in a GUIDed section. Examines a GUIDed section specified by InputSection. If GUID for InputSection does not match the GUID that this handler supports, then RETURN_UNSUPPORTED is returned. If the required information can not be retrieved from InputSection, then RETURN_INVALID_PARAMETER is returned. If the GUID of InputSection does match the GUID that this handler supports, then the size required to hold the decoded buffer is returned in OututBufferSize, the size of an optional scratch buffer is returned in ScratchSize, and the Attributes field from EFI_GUID_DEFINED_SECTION header of InputSection is returned in SectionAttribute. If InputSection is NULL, then ASSERT(). If OutputBufferSize is NULL, then ASSERT(). If ScratchBufferSize is NULL, then ASSERT(). If SectionAttribute is NULL, then ASSERT(). @param[in] InputSection A pointer to a GUIDed section of an FFS formatted file. @param[out] OutputBufferSize A pointer to the size, in bytes, of an output buffer required if the buffer specified by InputSection were decoded. @param[out] ScratchBufferSize A pointer to the size, in bytes, required as scratch space if the buffer specified by InputSection were decoded. @param[out] SectionAttribute A pointer to the attributes of the GUIDed section. See the Attributes field of EFI_GUID_DEFINED_SECTION in the PI Specification. @retval RETURN_SUCCESS The information about InputSection was returned. @retval RETURN_UNSUPPORTED The section specified by InputSection does not match the GUID this handler supports. @retval RETURN_INVALID_PARAMETER The information can not be retrieved from the section specified by InputSection. **/ RETURN_STATUS EFIAPI LzmaGuidedSectionGetInfo ( IN CONST VOID *InputSection, OUT UINT32 *OutputBufferSize, OUT UINT32 *ScratchBufferSize, OUT UINT16 *SectionAttribute ) { ASSERT (InputSection != NULL); ASSERT (OutputBufferSize != NULL); ASSERT (ScratchBufferSize != NULL); ASSERT (SectionAttribute != NULL); if (IS_SECTION2 (InputSection)) { if (!CompareGuid ( &gLzmaCustomDecompressGuid, &(((EFI_GUID_DEFINED_SECTION2 *) InputSection)->SectionDefinitionGuid))) { return RETURN_INVALID_PARAMETER; } *SectionAttribute = ((EFI_GUID_DEFINED_SECTION2 *) InputSection)->Attributes; return LzmaUefiDecompressGetInfo ( (UINT8 *) InputSection + ((EFI_GUID_DEFINED_SECTION2 *) InputSection)->DataOffset, SECTION2_SIZE (InputSection) - ((EFI_GUID_DEFINED_SECTION2 *) InputSection)->DataOffset, OutputBufferSize, ScratchBufferSize ); } else { if (!CompareGuid ( &gLzmaCustomDecompressGuid, &(((EFI_GUID_DEFINED_SECTION *) InputSection)->SectionDefinitionGuid))) { return RETURN_INVALID_PARAMETER; } *SectionAttribute = ((EFI_GUID_DEFINED_SECTION *) InputSection)->Attributes; return LzmaUefiDecompressGetInfo ( (UINT8 *) InputSection + ((EFI_GUID_DEFINED_SECTION *) InputSection)->DataOffset, SECTION_SIZE (InputSection) - ((EFI_GUID_DEFINED_SECTION *) InputSection)->DataOffset, OutputBufferSize, ScratchBufferSize ); } } /** Decompress a LZAM compressed GUIDed section into a caller allocated output buffer. Decodes the GUIDed section specified by InputSection. If GUID for InputSection does not match the GUID that this handler supports, then RETURN_UNSUPPORTED is returned. If the data in InputSection can not be decoded, then RETURN_INVALID_PARAMETER is returned. If the GUID of InputSection does match the GUID that this handler supports, then InputSection is decoded into the buffer specified by OutputBuffer and the authentication status of this decode operation is returned in AuthenticationStatus. If the decoded buffer is identical to the data in InputSection, then OutputBuffer is set to point at the data in InputSection. Otherwise, the decoded data will be placed in caller allocated buffer specified by OutputBuffer. If InputSection is NULL, then ASSERT(). If OutputBuffer is NULL, then ASSERT(). If ScratchBuffer is NULL and this decode operation requires a scratch buffer, then ASSERT(). If AuthenticationStatus is NULL, then ASSERT(). @param[in] InputSection A pointer to a GUIDed section of an FFS formatted file. @param[out] OutputBuffer A pointer to a buffer that contains the result of a decode operation. @param[out] ScratchBuffer A caller allocated buffer that may be required by this function as a scratch buffer to perform the decode operation. @param[out] AuthenticationStatus A pointer to the authentication status of the decoded output buffer. See the definition of authentication status in the EFI_PEI_GUIDED_SECTION_EXTRACTION_PPI section of the PI Specification. EFI_AUTH_STATUS_PLATFORM_OVERRIDE must never be set by this handler. @retval RETURN_SUCCESS The buffer specified by InputSection was decoded. @retval RETURN_UNSUPPORTED The section specified by InputSection does not match the GUID this handler supports. @retval RETURN_INVALID_PARAMETER The section specified by InputSection can not be decoded. **/ RETURN_STATUS EFIAPI LzmaGuidedSectionExtraction ( IN CONST VOID *InputSection, OUT VOID **OutputBuffer, OUT VOID *ScratchBuffer, OPTIONAL OUT UINT32 *AuthenticationStatus ) { ASSERT (OutputBuffer != NULL); ASSERT (InputSection != NULL); if (IS_SECTION2 (InputSection)) { if (!CompareGuid ( &gLzmaCustomDecompressGuid, &(((EFI_GUID_DEFINED_SECTION2 *) InputSection)->SectionDefinitionGuid))) { return RETURN_INVALID_PARAMETER; } // // Authentication is set to Zero, which may be ignored. // *AuthenticationStatus = 0; return LzmaUefiDecompress ( (UINT8 *) InputSection + ((EFI_GUID_DEFINED_SECTION2 *) InputSection)->DataOffset, SECTION2_SIZE (InputSection) - ((EFI_GUID_DEFINED_SECTION2 *) InputSection)->DataOffset, *OutputBuffer, ScratchBuffer ); } else { if (!CompareGuid ( &gLzmaCustomDecompressGuid, &(((EFI_GUID_DEFINED_SECTION *) InputSection)->SectionDefinitionGuid))) { return RETURN_INVALID_PARAMETER; } // // Authentication is set to Zero, which may be ignored. // *AuthenticationStatus = 0; return LzmaUefiDecompress ( (UINT8 *) InputSection + ((EFI_GUID_DEFINED_SECTION *) InputSection)->DataOffset, SECTION_SIZE (InputSection) - ((EFI_GUID_DEFINED_SECTION *) InputSection)->DataOffset, *OutputBuffer, ScratchBuffer ); } } /** Register LzmaDecompress and LzmaDecompressGetInfo handlers with LzmaCustomerDecompressGuid. @retval RETURN_SUCCESS Register successfully. @retval RETURN_OUT_OF_RESOURCES No enough memory to store this handler. **/ EFI_STATUS EFIAPI LzmaDecompressLibConstructor ( ) { return ExtractGuidedSectionRegisterHandlers ( &gLzmaCustomDecompressGuid, LzmaGuidedSectionGetInfo, LzmaGuidedSectionExtraction ); }
gpl-2.0
binhqnguyen/ln
nsc/linux-2.6.18/net/ipv6/proc.c
11
9214
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * This file implements the various access functions for the * PROC file system. This is very similar to the IPv4 version, * except it reports the sockets in the INET6 address family. * * Version: $Id: proc.c,v 1.17 2002/02/01 22:01:04 davem Exp $ * * Authors: David S. Miller (davem@caip.rutgers.edu) * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/ipv6.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stddef.h> #include <net/sock.h> #include <net/tcp.h> #include <net/transp_v6.h> #include <net/ipv6.h> #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_net_devsnmp6; static int fold_prot_inuse(struct proto *proto) { int res = 0; int cpu; for_each_possible_cpu(cpu) res += proto->stats[cpu].inuse; return res; } static int sockstat6_seq_show(struct seq_file *seq, void *v) { seq_printf(seq, "TCP6: inuse %d\n", fold_prot_inuse(&tcpv6_prot)); seq_printf(seq, "UDP6: inuse %d\n", fold_prot_inuse(&udpv6_prot)); seq_printf(seq, "RAW6: inuse %d\n", fold_prot_inuse(&rawv6_prot)); seq_printf(seq, "FRAG6: inuse %d memory %d\n", ip6_frag_nqueues, atomic_read(&ip6_frag_mem)); return 0; } static struct snmp_mib snmp6_ipstats_list[] = { /* ipv6 mib according to RFC 2465 */ SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INRECEIVES), SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS), SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES), SNMP_MIB_ITEM("Ip6InAddrErrors", IPSTATS_MIB_INADDRERRORS), SNMP_MIB_ITEM("Ip6InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS), SNMP_MIB_ITEM("Ip6InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS), SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS), SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS), SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS), SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS), SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), SNMP_MIB_ITEM("Ip6ReasmReqds", IPSTATS_MIB_REASMREQDS), SNMP_MIB_ITEM("Ip6ReasmOKs", IPSTATS_MIB_REASMOKS), SNMP_MIB_ITEM("Ip6ReasmFails", IPSTATS_MIB_REASMFAILS), SNMP_MIB_ITEM("Ip6FragOKs", IPSTATS_MIB_FRAGOKS), SNMP_MIB_ITEM("Ip6FragFails", IPSTATS_MIB_FRAGFAILS), SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES), SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS), SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), SNMP_MIB_SENTINEL }; static struct snmp_mib snmp6_icmp6_list[] = { /* icmpv6 mib according to RFC 2466 Exceptions: {In|Out}AdminProhibs are removed, because I see no good reasons to account them separately of another dest.unreachs. OutErrs is zero identically. OutEchos too. OutRouterAdvertisements too. OutGroupMembQueries too. */ SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), SNMP_MIB_ITEM("Icmp6InDestUnreachs", ICMP6_MIB_INDESTUNREACHS), SNMP_MIB_ITEM("Icmp6InPktTooBigs", ICMP6_MIB_INPKTTOOBIGS), SNMP_MIB_ITEM("Icmp6InTimeExcds", ICMP6_MIB_INTIMEEXCDS), SNMP_MIB_ITEM("Icmp6InParmProblems", ICMP6_MIB_INPARMPROBLEMS), SNMP_MIB_ITEM("Icmp6InEchos", ICMP6_MIB_INECHOS), SNMP_MIB_ITEM("Icmp6InEchoReplies", ICMP6_MIB_INECHOREPLIES), SNMP_MIB_ITEM("Icmp6InGroupMembQueries", ICMP6_MIB_INGROUPMEMBQUERIES), SNMP_MIB_ITEM("Icmp6InGroupMembResponses", ICMP6_MIB_INGROUPMEMBRESPONSES), SNMP_MIB_ITEM("Icmp6InGroupMembReductions", ICMP6_MIB_INGROUPMEMBREDUCTIONS), SNMP_MIB_ITEM("Icmp6InRouterSolicits", ICMP6_MIB_INROUTERSOLICITS), SNMP_MIB_ITEM("Icmp6InRouterAdvertisements", ICMP6_MIB_INROUTERADVERTISEMENTS), SNMP_MIB_ITEM("Icmp6InNeighborSolicits", ICMP6_MIB_INNEIGHBORSOLICITS), SNMP_MIB_ITEM("Icmp6InNeighborAdvertisements", ICMP6_MIB_INNEIGHBORADVERTISEMENTS), SNMP_MIB_ITEM("Icmp6InRedirects", ICMP6_MIB_INREDIRECTS), SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), SNMP_MIB_ITEM("Icmp6OutDestUnreachs", ICMP6_MIB_OUTDESTUNREACHS), SNMP_MIB_ITEM("Icmp6OutPktTooBigs", ICMP6_MIB_OUTPKTTOOBIGS), SNMP_MIB_ITEM("Icmp6OutTimeExcds", ICMP6_MIB_OUTTIMEEXCDS), SNMP_MIB_ITEM("Icmp6OutParmProblems", ICMP6_MIB_OUTPARMPROBLEMS), SNMP_MIB_ITEM("Icmp6OutEchoReplies", ICMP6_MIB_OUTECHOREPLIES), SNMP_MIB_ITEM("Icmp6OutRouterSolicits", ICMP6_MIB_OUTROUTERSOLICITS), SNMP_MIB_ITEM("Icmp6OutNeighborSolicits", ICMP6_MIB_OUTNEIGHBORSOLICITS), SNMP_MIB_ITEM("Icmp6OutNeighborAdvertisements", ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS), SNMP_MIB_ITEM("Icmp6OutRedirects", ICMP6_MIB_OUTREDIRECTS), SNMP_MIB_ITEM("Icmp6OutGroupMembResponses", ICMP6_MIB_OUTGROUPMEMBRESPONSES), SNMP_MIB_ITEM("Icmp6OutGroupMembReductions", ICMP6_MIB_OUTGROUPMEMBREDUCTIONS), SNMP_MIB_SENTINEL }; static struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), SNMP_MIB_SENTINEL }; static unsigned long fold_field(void *mib[], int offt) { unsigned long res = 0; int i; for_each_possible_cpu(i) { res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); } return res; } static inline void snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) { int i; for (i=0; itemlist[i].name; i++) seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, fold_field(mib, itemlist[i].entry)); } static int snmp6_seq_show(struct seq_file *seq, void *v) { struct inet6_dev *idev = (struct inet6_dev *)seq->private; if (idev) { seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); } else { snmp6_seq_show_item(seq, (void **)ipv6_statistics, snmp6_ipstats_list); snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list); snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list); } return 0; } static int sockstat6_seq_open(struct inode *inode, struct file *file) { return single_open(file, sockstat6_seq_show, NULL); } static struct file_operations sockstat6_seq_fops = { .owner = THIS_MODULE, .open = sockstat6_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int snmp6_seq_open(struct inode *inode, struct file *file) { return single_open(file, snmp6_seq_show, PDE(inode)->data); } static struct file_operations snmp6_seq_fops = { .owner = THIS_MODULE, .open = snmp6_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; int snmp6_register_dev(struct inet6_dev *idev) { struct proc_dir_entry *p; if (!idev || !idev->dev) return -EINVAL; if (!proc_net_devsnmp6) return -ENOENT; p = create_proc_entry(idev->dev->name, S_IRUGO, proc_net_devsnmp6); if (!p) return -ENOMEM; p->data = idev; p->proc_fops = &snmp6_seq_fops; idev->stats.proc_dir_entry = p; return 0; } int snmp6_unregister_dev(struct inet6_dev *idev) { if (!proc_net_devsnmp6) return -ENOENT; if (!idev || !idev->stats.proc_dir_entry) return -EINVAL; remove_proc_entry(idev->stats.proc_dir_entry->name, proc_net_devsnmp6); return 0; } int __init ipv6_misc_proc_init(void) { int rc = 0; if (!proc_net_fops_create("snmp6", S_IRUGO, &snmp6_seq_fops)) goto proc_snmp6_fail; proc_net_devsnmp6 = proc_mkdir("dev_snmp6", proc_net); if (!proc_net_devsnmp6) goto proc_dev_snmp6_fail; if (!proc_net_fops_create("sockstat6", S_IRUGO, &sockstat6_seq_fops)) goto proc_sockstat6_fail; out: return rc; proc_sockstat6_fail: proc_net_remove("dev_snmp6"); proc_dev_snmp6_fail: proc_net_remove("snmp6"); proc_snmp6_fail: rc = -ENOMEM; goto out; } void ipv6_misc_proc_exit(void) { proc_net_remove("sockstat6"); proc_net_remove("dev_snmp6"); proc_net_remove("snmp6"); } #else /* CONFIG_PROC_FS */ int snmp6_register_dev(struct inet6_dev *idev) { return 0; } int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif /* CONFIG_PROC_FS */ int snmp6_alloc_dev(struct inet6_dev *idev) { int err = -ENOMEM; if (!idev || !idev->dev) return -EINVAL; if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib), __alignof__(struct icmpv6_mib)) < 0) goto err_icmp; return 0; err_icmp: return err; } int snmp6_free_dev(struct inet6_dev *idev) { snmp6_mib_free((void **)idev->stats.icmpv6); return 0; }
gpl-2.0
M-Scholli/XCSoar
src/Task/TypeStrings.cpp
11
5439
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2015 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "TypeStrings.hpp" #include "Engine/Task/Factory/TaskFactoryType.hpp" #include "Engine/Task/Factory/TaskPointFactoryType.hpp" #include "Language/Language.hpp" #include "Util/Macros.hpp" static const TCHAR *const task_factory_names[] = { N_("FAI badges/records"), N_("FAI triangle"), N_("FAI out and return"), N_("FAI goal"), N_("Racing"), N_("AAT"), N_("Modified area task (MAT)"), N_("Mixed"), N_("Touring"), }; static_assert(ARRAY_SIZE(task_factory_names) == unsigned(TaskFactoryType::COUNT), "Wrong array size"); const TCHAR* OrderedTaskFactoryName(TaskFactoryType type) { return gettext(task_factory_names[unsigned(type)]); } static const TCHAR *const task_factory_descriptions[] = { N_("FAI rules, allows only FAI start, finish and turn point types, for badges and " "records. Enables FAI finish height for final glide calculation."), N_("FAI rules, path from a start to two turn points and return."), N_("FAI rules, path from start to a single turn point and return."), N_("FAI rules, path from start to a goal destination."), N_("Racing task around turn points. Can also be used for FAI badge and record tasks. " "Allows all shapes of observation zones."), N_("Task through assigned areas, minimum task time applies. Restricted to cylinder " "and sector observation zones."), N_("Modified area task. Task with start, finish and at least one predefined 1-mile cylinder. Pilot can add additional points as needed. Minimum task time applies."), N_("Racing task with a mix of assigned areas and turn points, minimum task time applies."), N_("Casual touring task, uses start and finish cylinders and FAI sector turn points."), }; static_assert(ARRAY_SIZE(task_factory_descriptions) == unsigned(TaskFactoryType::COUNT), "Wrong array size"); const TCHAR* OrderedTaskFactoryDescription(TaskFactoryType type) { return gettext(task_factory_descriptions[unsigned(type)]); } static const TCHAR *const tp_factory_descriptions[] = { N_("A 90 degree sector with 1km radius. Cross corner edge from inside area to start."), N_("A straight line start gate. Cross start gate from inside area to start."), N_("A cylinder. Exit area to start."), N_("A 90 degree sector with 'infinite' length sides. Cross any edge, scored from " "corner point."), N_("(German rules) Any point within 1/2 km of center or 10km of a 90 degree sector. " "Scored from center."), N_("(British rules) Any point within 1/2 km of center or 20km of a 90 degree sector. " "Scored from center."), N_("(British rules) Any point within 1/2 km of center or 10km of a 180 degree sector. " "Scored from center."), N_("A cylinder. Any point within area scored from center."), N_("A 1 mile cylinder. Scored by farthest point reached in area."), N_("A cylinder. Scored by farthest point reached in area."), N_("A sector that can vary in angle and radius. Scored by farthest point reached " "inside area."), N_("A 90 degree sector with 1km radius. Cross edge to finish."), N_("Cross finish gate line into area to finish."), N_("Enter cylinder to finish."), N_("A 180 degree sector with 5km radius. Exit area in any direction to start."), N_("A sector that can vary in angle, inner and outer radius. Scored by farthest point " "reached inside area."), N_("A symmetric quadrant with a custom radius."), N_("A keyhole. Scored by farthest point reached in area."), }; static_assert(ARRAY_SIZE(tp_factory_descriptions) == unsigned(TaskPointFactoryType::COUNT), "Wrong array size"); const TCHAR* OrderedTaskPointDescription(TaskPointFactoryType type) { return tp_factory_descriptions[unsigned(type)]; } static const TCHAR *const tp_factory_names[] = { N_("FAI start quadrant"), N_("Start line"), N_("Start cylinder"), N_("FAI quadrant"), N_("Keyhole sector (DAeC)"), N_("BGA Fixed Course sector"), N_("BGA Enhanced Option Fixed Course sector"), N_("Turn point cylinder"), N_("Cylinder with 1 mile radius."), N_("Area cylinder"), N_("Area sector"), N_("FAI finish quadrant"), N_("Finish line"), N_("Finish cylinder"), N_("BGA start sector"), N_("Area sector with inner radius"), N_("Symmetric quadrant"), N_("Area keyhole"), }; static_assert(ARRAY_SIZE(tp_factory_names) == unsigned(TaskPointFactoryType::COUNT), "Wrong array size"); const TCHAR* OrderedTaskPointName(TaskPointFactoryType type) { return tp_factory_names[unsigned(type)]; }
gpl-2.0
vapier/binutils-gdb
gas/config/tc-ia64.c
11
307146
/* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture. Copyright (C) 1998-2015 Free Software Foundation, Inc. Contributed by David Mosberger-Tang <davidm@hpl.hp.com> This file is part of GAS, the GNU Assembler. GAS is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GAS; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* TODO: - optional operands - directives: .eb .estate .lb .popsection .previous .psr .pushsection - labels are wrong if automatic alignment is introduced (e.g., checkout the second real10 definition in test-data.s) - DV-related stuff: <reg>.safe_across_calls and any other DV-related directives I don't have documentation for. verify mod-sched-brs reads/writes are checked/marked (and other notes) */ #include "as.h" #include "safe-ctype.h" #include "dwarf2dbg.h" #include "subsegs.h" #include "opcode/ia64.h" #include "elf/ia64.h" #include "bfdver.h" #include <time.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0]))) /* Some systems define MIN in, e.g., param.h. */ #undef MIN #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define NUM_SLOTS 4 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS] #define CURR_SLOT md.slot[md.curr_slot] #define O_pseudo_fixup (O_max + 1) enum special_section { /* IA-64 ABI section pseudo-ops. */ SPECIAL_SECTION_BSS = 0, SPECIAL_SECTION_SBSS, SPECIAL_SECTION_SDATA, SPECIAL_SECTION_RODATA, SPECIAL_SECTION_COMMENT, SPECIAL_SECTION_UNWIND, SPECIAL_SECTION_UNWIND_INFO, /* HPUX specific section pseudo-ops. */ SPECIAL_SECTION_INIT_ARRAY, SPECIAL_SECTION_FINI_ARRAY, }; enum reloc_func { FUNC_DTP_MODULE, FUNC_DTP_RELATIVE, FUNC_FPTR_RELATIVE, FUNC_GP_RELATIVE, FUNC_LT_RELATIVE, FUNC_LT_RELATIVE_X, FUNC_PC_RELATIVE, FUNC_PLT_RELATIVE, FUNC_SEC_RELATIVE, FUNC_SEG_RELATIVE, FUNC_TP_RELATIVE, FUNC_LTV_RELATIVE, FUNC_LT_FPTR_RELATIVE, FUNC_LT_DTP_MODULE, FUNC_LT_DTP_RELATIVE, FUNC_LT_TP_RELATIVE, FUNC_IPLT_RELOC, #ifdef TE_VMS FUNC_SLOTCOUNT_RELOC, #endif }; enum reg_symbol { REG_GR = 0, REG_FR = (REG_GR + 128), REG_AR = (REG_FR + 128), REG_CR = (REG_AR + 128), REG_DAHR = (REG_CR + 128), REG_P = (REG_DAHR + 8), REG_BR = (REG_P + 64), REG_IP = (REG_BR + 8), REG_CFM, REG_PR, REG_PR_ROT, REG_PSR, REG_PSR_L, REG_PSR_UM, /* The following are pseudo-registers for use by gas only. */ IND_CPUID, IND_DBR, IND_DTR, IND_ITR, IND_IBR, IND_MSR, IND_PKR, IND_PMC, IND_PMD, IND_DAHR, IND_RR, /* The following pseudo-registers are used for unwind directives only: */ REG_PSP, REG_PRIUNAT, REG_NUM }; enum dynreg_type { DYNREG_GR = 0, /* dynamic general purpose register */ DYNREG_FR, /* dynamic floating point register */ DYNREG_PR, /* dynamic predicate register */ DYNREG_NUM_TYPES }; enum operand_match_result { OPERAND_MATCH, OPERAND_OUT_OF_RANGE, OPERAND_MISMATCH }; /* On the ia64, we can't know the address of a text label until the instructions are packed into a bundle. To handle this, we keep track of the list of labels that appear in front of each instruction. */ struct label_fix { struct label_fix *next; struct symbol *sym; bfd_boolean dw2_mark_labels; }; #ifdef TE_VMS /* An internally used relocation. */ #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1) #endif /* This is the endianness of the current section. */ extern int target_big_endian; /* This is the default endianness. */ static int default_big_endian = TARGET_BYTES_BIG_ENDIAN; void (*ia64_number_to_chars) (char *, valueT, int); static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int); static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int); static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int); static struct hash_control *alias_hash; static struct hash_control *alias_name_hash; static struct hash_control *secalias_hash; static struct hash_control *secalias_name_hash; /* List of chars besides those in app.c:symbol_chars that can start an operand. Used to prevent the scrubber eating vital white-space. */ const char ia64_symbol_chars[] = "@?"; /* Characters which always start a comment. */ const char comment_chars[] = ""; /* Characters which start a comment at the beginning of a line. */ const char line_comment_chars[] = "#"; /* Characters which may be used to separate multiple commands on a single line. */ const char line_separator_chars[] = ";{}"; /* Characters which are used to indicate an exponent in a floating point number. */ const char EXP_CHARS[] = "eE"; /* Characters which mean that a number is a floating point constant, as in 0d1.0. */ const char FLT_CHARS[] = "rRsSfFdDxXpP"; /* ia64-specific option processing: */ const char *md_shortopts = "m:N:x::"; struct option md_longopts[] = { #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1) {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP}, #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2) {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC} }; size_t md_longopts_size = sizeof (md_longopts); static struct { struct hash_control *pseudo_hash; /* pseudo opcode hash table */ struct hash_control *reg_hash; /* register name hash table */ struct hash_control *dynreg_hash; /* dynamic register hash table */ struct hash_control *const_hash; /* constant hash table */ struct hash_control *entry_hash; /* code entry hint hash table */ /* If X_op is != O_absent, the registername for the instruction's qualifying predicate. If NULL, p0 is assumed for instructions that are predictable. */ expressionS qp; /* Optimize for which CPU. */ enum { itanium1, itanium2 } tune; /* What to do when hint.b is used. */ enum { hint_b_error, hint_b_warning, hint_b_ok } hint_b; unsigned int manual_bundling : 1, debug_dv: 1, detect_dv: 1, explicit_mode : 1, /* which mode we're in */ default_explicit_mode : 1, /* which mode is the default */ mode_explicitly_set : 1, /* was the current mode explicitly set? */ auto_align : 1, keep_pending_output : 1; /* What to do when something is wrong with unwind directives. */ enum { unwind_check_warning, unwind_check_error } unwind_check; /* Each bundle consists of up to three instructions. We keep track of four most recent instructions so we can correctly set the end_of_insn_group for the last instruction in a bundle. */ int curr_slot; int num_slots_in_use; struct slot { unsigned int end_of_insn_group : 1, manual_bundling_on : 1, manual_bundling_off : 1, loc_directive_seen : 1; signed char user_template; /* user-selected template, if any */ unsigned char qp_regno; /* qualifying predicate */ /* This duplicates a good fraction of "struct fix" but we can't use a "struct fix" instead since we can't call fix_new_exp() until we know the address of the instruction. */ int num_fixups; struct insn_fix { bfd_reloc_code_real_type code; enum ia64_opnd opnd; /* type of operand in need of fix */ unsigned int is_pcrel : 1; /* is operand pc-relative? */ expressionS expr; /* the value to be inserted */ } fixup[2]; /* at most two fixups per insn */ struct ia64_opcode *idesc; struct label_fix *label_fixups; struct label_fix *tag_fixups; struct unw_rec_list *unwind_record; /* Unwind directive. */ expressionS opnd[6]; char *src_file; unsigned int src_line; struct dwarf2_line_info debug_line; } slot[NUM_SLOTS]; segT last_text_seg; struct dynreg { struct dynreg *next; /* next dynamic register */ const char *name; unsigned short base; /* the base register number */ unsigned short num_regs; /* # of registers in this set */ } *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot; flagword flags; /* ELF-header flags */ struct mem_offset { unsigned hint:1; /* is this hint currently valid? */ bfd_vma offset; /* mem.offset offset */ bfd_vma base; /* mem.offset base */ } mem_offset; int path; /* number of alt. entry points seen */ const char **entry_labels; /* labels of all alternate paths in the current DV-checking block. */ int maxpaths; /* size currently allocated for entry_labels */ int pointer_size; /* size in bytes of a pointer */ int pointer_size_shift; /* shift size of a pointer for alignment */ symbolS *indregsym[IND_RR - IND_CPUID + 1]; } md; /* These are not const, because they are modified to MMI for non-itanium1 targets below. */ /* MFI bundle of nops. */ static unsigned char le_nop[16] = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00 }; /* MFI bundle of nops with stop-bit. */ static unsigned char le_nop_stop[16] = { 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00 }; /* application registers: */ #define AR_K0 0 #define AR_K7 7 #define AR_RSC 16 #define AR_BSP 17 #define AR_BSPSTORE 18 #define AR_RNAT 19 #define AR_FCR 21 #define AR_EFLAG 24 #define AR_CSD 25 #define AR_SSD 26 #define AR_CFLG 27 #define AR_FSR 28 #define AR_FIR 29 #define AR_FDR 30 #define AR_CCV 32 #define AR_UNAT 36 #define AR_FPSR 40 #define AR_ITC 44 #define AR_RUC 45 #define AR_PFS 64 #define AR_LC 65 #define AR_EC 66 static const struct { const char *name; unsigned int regnum; } ar[] = { {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1}, {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3}, {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5}, {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7}, {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP}, {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT}, {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG}, {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD}, {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR}, {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR}, {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT}, {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC}, {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS}, {"ar.lc", AR_LC}, {"ar.ec", AR_EC}, }; /* control registers: */ #define CR_DCR 0 #define CR_ITM 1 #define CR_IVA 2 #define CR_PTA 8 #define CR_GPTA 9 #define CR_IPSR 16 #define CR_ISR 17 #define CR_IIP 19 #define CR_IFA 20 #define CR_ITIR 21 #define CR_IIPA 22 #define CR_IFS 23 #define CR_IIM 24 #define CR_IHA 25 #define CR_IIB0 26 #define CR_IIB1 27 #define CR_LID 64 #define CR_IVR 65 #define CR_TPR 66 #define CR_EOI 67 #define CR_IRR0 68 #define CR_IRR3 71 #define CR_ITV 72 #define CR_PMV 73 #define CR_CMCV 74 #define CR_LRR0 80 #define CR_LRR1 81 static const struct { const char *name; unsigned int regnum; } cr[] = { {"cr.dcr", CR_DCR}, {"cr.itm", CR_ITM}, {"cr.iva", CR_IVA}, {"cr.pta", CR_PTA}, {"cr.gpta", CR_GPTA}, {"cr.ipsr", CR_IPSR}, {"cr.isr", CR_ISR}, {"cr.iip", CR_IIP}, {"cr.ifa", CR_IFA}, {"cr.itir", CR_ITIR}, {"cr.iipa", CR_IIPA}, {"cr.ifs", CR_IFS}, {"cr.iim", CR_IIM}, {"cr.iha", CR_IHA}, {"cr.iib0", CR_IIB0}, {"cr.iib1", CR_IIB1}, {"cr.lid", CR_LID}, {"cr.ivr", CR_IVR}, {"cr.tpr", CR_TPR}, {"cr.eoi", CR_EOI}, {"cr.irr0", CR_IRR0}, {"cr.irr1", CR_IRR0 + 1}, {"cr.irr2", CR_IRR0 + 2}, {"cr.irr3", CR_IRR3}, {"cr.itv", CR_ITV}, {"cr.pmv", CR_PMV}, {"cr.cmcv", CR_CMCV}, {"cr.lrr0", CR_LRR0}, {"cr.lrr1", CR_LRR1} }; #define PSR_MFL 4 #define PSR_IC 13 #define PSR_DFL 18 #define PSR_CPL 32 static const struct const_desc { const char *name; valueT value; } const_bits[] = { /* PSR constant masks: */ /* 0: reserved */ {"psr.be", ((valueT) 1) << 1}, {"psr.up", ((valueT) 1) << 2}, {"psr.ac", ((valueT) 1) << 3}, {"psr.mfl", ((valueT) 1) << 4}, {"psr.mfh", ((valueT) 1) << 5}, /* 6-12: reserved */ {"psr.ic", ((valueT) 1) << 13}, {"psr.i", ((valueT) 1) << 14}, {"psr.pk", ((valueT) 1) << 15}, /* 16: reserved */ {"psr.dt", ((valueT) 1) << 17}, {"psr.dfl", ((valueT) 1) << 18}, {"psr.dfh", ((valueT) 1) << 19}, {"psr.sp", ((valueT) 1) << 20}, {"psr.pp", ((valueT) 1) << 21}, {"psr.di", ((valueT) 1) << 22}, {"psr.si", ((valueT) 1) << 23}, {"psr.db", ((valueT) 1) << 24}, {"psr.lp", ((valueT) 1) << 25}, {"psr.tb", ((valueT) 1) << 26}, {"psr.rt", ((valueT) 1) << 27}, /* 28-31: reserved */ /* 32-33: cpl (current privilege level) */ {"psr.is", ((valueT) 1) << 34}, {"psr.mc", ((valueT) 1) << 35}, {"psr.it", ((valueT) 1) << 36}, {"psr.id", ((valueT) 1) << 37}, {"psr.da", ((valueT) 1) << 38}, {"psr.dd", ((valueT) 1) << 39}, {"psr.ss", ((valueT) 1) << 40}, /* 41-42: ri (restart instruction) */ {"psr.ed", ((valueT) 1) << 43}, {"psr.bn", ((valueT) 1) << 44}, }; /* indirect register-sets/memory: */ static const struct { const char *name; unsigned int regnum; } indirect_reg[] = { { "CPUID", IND_CPUID }, { "cpuid", IND_CPUID }, { "dbr", IND_DBR }, { "dtr", IND_DTR }, { "itr", IND_ITR }, { "ibr", IND_IBR }, { "msr", IND_MSR }, { "pkr", IND_PKR }, { "pmc", IND_PMC }, { "pmd", IND_PMD }, { "dahr", IND_DAHR }, { "rr", IND_RR }, }; /* Pseudo functions used to indicate relocation types (these functions start with an at sign (@). */ static struct { const char *name; enum pseudo_type { PSEUDO_FUNC_NONE, PSEUDO_FUNC_RELOC, PSEUDO_FUNC_CONST, PSEUDO_FUNC_REG, PSEUDO_FUNC_FLOAT } type; union { unsigned long ival; symbolS *sym; } u; } pseudo_func[] = { /* reloc pseudo functions (these must come first!): */ { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } }, { "dtprel", PSEUDO_FUNC_RELOC, { 0 } }, { "fptr", PSEUDO_FUNC_RELOC, { 0 } }, { "gprel", PSEUDO_FUNC_RELOC, { 0 } }, { "ltoff", PSEUDO_FUNC_RELOC, { 0 } }, { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } }, { "pcrel", PSEUDO_FUNC_RELOC, { 0 } }, { "pltoff", PSEUDO_FUNC_RELOC, { 0 } }, { "secrel", PSEUDO_FUNC_RELOC, { 0 } }, { "segrel", PSEUDO_FUNC_RELOC, { 0 } }, { "tprel", PSEUDO_FUNC_RELOC, { 0 } }, { "ltv", PSEUDO_FUNC_RELOC, { 0 } }, { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */ { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */ { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */ { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */ { "iplt", PSEUDO_FUNC_RELOC, { 0 } }, #ifdef TE_VMS { "slotcount", PSEUDO_FUNC_RELOC, { 0 } }, #endif /* mbtype4 constants: */ { "alt", PSEUDO_FUNC_CONST, { 0xa } }, { "brcst", PSEUDO_FUNC_CONST, { 0x0 } }, { "mix", PSEUDO_FUNC_CONST, { 0x8 } }, { "rev", PSEUDO_FUNC_CONST, { 0xb } }, { "shuf", PSEUDO_FUNC_CONST, { 0x9 } }, /* fclass constants: */ { "nat", PSEUDO_FUNC_CONST, { 0x100 } }, { "qnan", PSEUDO_FUNC_CONST, { 0x080 } }, { "snan", PSEUDO_FUNC_CONST, { 0x040 } }, { "pos", PSEUDO_FUNC_CONST, { 0x001 } }, { "neg", PSEUDO_FUNC_CONST, { 0x002 } }, { "zero", PSEUDO_FUNC_CONST, { 0x004 } }, { "unorm", PSEUDO_FUNC_CONST, { 0x008 } }, { "norm", PSEUDO_FUNC_CONST, { 0x010 } }, { "inf", PSEUDO_FUNC_CONST, { 0x020 } }, { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */ /* hint constants: */ { "pause", PSEUDO_FUNC_CONST, { 0x0 } }, { "priority", PSEUDO_FUNC_CONST, { 0x1 } }, /* tf constants: */ { "clz", PSEUDO_FUNC_CONST, { 32 } }, { "mpy", PSEUDO_FUNC_CONST, { 33 } }, { "datahints", PSEUDO_FUNC_CONST, { 34 } }, /* unwind-related constants: */ { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } }, { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } }, { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */ { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } }, { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } }, { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } }, { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } }, /* unwind-related registers: */ { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } } }; /* 41-bit nop opcodes (one per unit): */ static const bfd_vma nop[IA64_NUM_UNITS] = { 0x0000000000LL, /* NIL => break 0 */ 0x0008000000LL, /* I-unit nop */ 0x0008000000LL, /* M-unit nop */ 0x4000000000LL, /* B-unit nop */ 0x0008000000LL, /* F-unit nop */ 0x0000000000LL, /* L-"unit" nop immediate */ 0x0008000000LL, /* X-unit nop */ }; /* Can't be `const' as it's passed to input routines (which have the habit of setting temporary sentinels. */ static char special_section_name[][20] = { {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"}, {".IA_64.unwind"}, {".IA_64.unwind_info"}, {".init_array"}, {".fini_array"} }; /* The best template for a particular sequence of up to three instructions: */ #define N IA64_NUM_TYPES static unsigned char best_template[N][N][N]; #undef N /* Resource dependencies currently in effect */ static struct rsrc { int depind; /* dependency index */ const struct ia64_dependency *dependency; /* actual dependency */ unsigned specific:1, /* is this a specific bit/regno? */ link_to_qp_branch:1; /* will a branch on the same QP clear it?*/ int index; /* specific regno/bit within dependency */ int note; /* optional qualifying note (0 if none) */ #define STATE_NONE 0 #define STATE_STOP 1 #define STATE_SRLZ 2 int insn_srlz; /* current insn serialization state */ int data_srlz; /* current data serialization state */ int qp_regno; /* qualifying predicate for this usage */ char *file; /* what file marked this dependency */ unsigned int line; /* what line marked this dependency */ struct mem_offset mem_offset; /* optional memory offset hint */ enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */ int path; /* corresponding code entry index */ } *regdeps = NULL; static int regdepslen = 0; static int regdepstotlen = 0; static const char *dv_mode[] = { "RAW", "WAW", "WAR" }; static const char *dv_sem[] = { "none", "implied", "impliedf", "data", "instr", "specific", "stop", "other" }; static const char *dv_cmp_type[] = { "none", "OR", "AND" }; /* Current state of PR mutexation */ static struct qpmutex { valueT prmask; int path; } *qp_mutexes = NULL; /* QP mutex bitmasks */ static int qp_mutexeslen = 0; static int qp_mutexestotlen = 0; static valueT qp_safe_across_calls = 0; /* Current state of PR implications */ static struct qp_imply { unsigned p1:6; unsigned p2:6; unsigned p2_branched:1; int path; } *qp_implies = NULL; static int qp_implieslen = 0; static int qp_impliestotlen = 0; /* Keep track of static GR values so that indirect register usage can sometimes be tracked. */ static struct gr { unsigned known:1; int path; valueT value; } gr_values[128] = { { 1, #ifdef INT_MAX INT_MAX, #else (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1, #endif 0 } }; /* Remember the alignment frag. */ static fragS *align_frag; /* These are the routines required to output the various types of unwind records. */ /* A slot_number is a frag address plus the slot index (0-2). We use the frag address here so that if there is a section switch in the middle of a function, then instructions emitted to a different section are not counted. Since there may be more than one frag for a function, this means we also need to keep track of which frag this address belongs to so we can compute inter-frag distances. This also nicely solves the problem with nops emitted for align directives, which can't easily be counted, but can easily be derived from frag sizes. */ typedef struct unw_rec_list { unwind_record r; unsigned long slot_number; fragS *slot_frag; struct unw_rec_list *next; } unw_rec_list; #define SLOT_NUM_NOT_SET (unsigned)-1 /* Linked list of saved prologue counts. A very poor implementation of a map from label numbers to prologue counts. */ typedef struct label_prologue_count { struct label_prologue_count *next; unsigned long label_number; unsigned int prologue_count; } label_prologue_count; typedef struct proc_pending { symbolS *sym; struct proc_pending *next; } proc_pending; static struct { /* Maintain a list of unwind entries for the current function. */ unw_rec_list *list; unw_rec_list *tail; /* Any unwind entries that should be attached to the current slot that an insn is being constructed for. */ unw_rec_list *current_entry; /* These are used to create the unwind table entry for this function. */ proc_pending proc_pending; symbolS *info; /* pointer to unwind info */ symbolS *personality_routine; segT saved_text_seg; subsegT saved_text_subseg; unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */ /* TRUE if processing unwind directives in a prologue region. */ unsigned int prologue : 1; unsigned int prologue_mask : 4; unsigned int prologue_gr : 7; unsigned int body : 1; unsigned int insn : 1; unsigned int prologue_count; /* number of .prologues seen so far */ /* Prologue counts at previous .label_state directives. */ struct label_prologue_count * saved_prologue_counts; /* List of split up .save-s. */ unw_p_record *pending_saves; } unwind; /* The input value is a negated offset from psp, and specifies an address psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we must add 16 and divide by 4 to get the encoded value. */ #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4) typedef void (*vbyte_func) (int, char *, char *); /* Forward declarations: */ static void dot_alias (int); static int parse_operand_and_eval (expressionS *, int); static void emit_one_bundle (void); static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *, bfd_reloc_code_real_type); static void insn_group_break (int, int, int); static void add_qp_mutex (valueT); static void add_qp_imply (int, int); static void clear_qp_mutex (valueT); static void clear_qp_implies (valueT, valueT); static void print_dependency (const char *, int); static void instruction_serialization (void); static void data_serialization (void); static void output_R3_format (vbyte_func, unw_record_type, unsigned long); static void output_B3_format (vbyte_func, unsigned long, unsigned long); static void output_B4_format (vbyte_func, unw_record_type, unsigned long); static void free_saved_prologue_counts (void); /* Determine if application register REGNUM resides only in the integer unit (as opposed to the memory unit). */ static int ar_is_only_in_integer_unit (int reg) { reg -= REG_AR; return reg >= 64 && reg <= 111; } /* Determine if application register REGNUM resides only in the memory unit (as opposed to the integer unit). */ static int ar_is_only_in_memory_unit (int reg) { reg -= REG_AR; return reg >= 0 && reg <= 47; } /* Switch to section NAME and create section if necessary. It's rather ugly that we have to manipulate input_line_pointer but I don't see any other way to accomplish the same thing without changing obj-elf.c (which may be the Right Thing, in the end). */ static void set_section (char *name) { char *saved_input_line_pointer; saved_input_line_pointer = input_line_pointer; input_line_pointer = name; obj_elf_section (0); input_line_pointer = saved_input_line_pointer; } /* Map 's' to SHF_IA_64_SHORT. */ bfd_vma ia64_elf_section_letter (int letter, char **ptr_msg) { if (letter == 's') return SHF_IA_64_SHORT; else if (letter == 'o') return SHF_LINK_ORDER; #ifdef TE_VMS else if (letter == 'O') return SHF_IA_64_VMS_OVERLAID; else if (letter == 'g') return SHF_IA_64_VMS_GLOBAL; #endif *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string"); return -1; } /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */ flagword ia64_elf_section_flags (flagword flags, bfd_vma attr, int type ATTRIBUTE_UNUSED) { if (attr & SHF_IA_64_SHORT) flags |= SEC_SMALL_DATA; return flags; } int ia64_elf_section_type (const char *str, size_t len) { #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0)) if (STREQ (ELF_STRING_ia64_unwind_info)) return SHT_PROGBITS; if (STREQ (ELF_STRING_ia64_unwind_info_once)) return SHT_PROGBITS; if (STREQ (ELF_STRING_ia64_unwind)) return SHT_IA_64_UNWIND; if (STREQ (ELF_STRING_ia64_unwind_once)) return SHT_IA_64_UNWIND; if (STREQ ("unwind")) return SHT_IA_64_UNWIND; return -1; #undef STREQ } static unsigned int set_regstack (unsigned int ins, unsigned int locs, unsigned int outs, unsigned int rots) { /* Size of frame. */ unsigned int sof; sof = ins + locs + outs; if (sof > 96) { as_bad (_("Size of frame exceeds maximum of 96 registers")); return 0; } if (rots > sof) { as_warn (_("Size of rotating registers exceeds frame size")); return 0; } md.in.base = REG_GR + 32; md.loc.base = md.in.base + ins; md.out.base = md.loc.base + locs; md.in.num_regs = ins; md.loc.num_regs = locs; md.out.num_regs = outs; md.rot.num_regs = rots; return sof; } void ia64_flush_insns (void) { struct label_fix *lfix; segT saved_seg; subsegT saved_subseg; unw_rec_list *ptr; bfd_boolean mark; if (!md.last_text_seg) return; saved_seg = now_seg; saved_subseg = now_subseg; subseg_set (md.last_text_seg, 0); while (md.num_slots_in_use > 0) emit_one_bundle (); /* force out queued instructions */ /* In case there are labels following the last instruction, resolve those now. */ mark = FALSE; for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next) { symbol_set_value_now (lfix->sym); mark |= lfix->dw2_mark_labels; } if (mark) { dwarf2_where (&CURR_SLOT.debug_line); CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK; dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line); dwarf2_consume_line_info (); } CURR_SLOT.label_fixups = 0; for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next) symbol_set_value_now (lfix->sym); CURR_SLOT.tag_fixups = 0; /* In case there are unwind directives following the last instruction, resolve those now. We only handle prologue, body, and endp directives here. Give an error for others. */ for (ptr = unwind.current_entry; ptr; ptr = ptr->next) { switch (ptr->r.type) { case prologue: case prologue_gr: case body: case endp: ptr->slot_number = (unsigned long) frag_more (0); ptr->slot_frag = frag_now; break; /* Allow any record which doesn't have a "t" field (i.e., doesn't relate to a particular instruction). */ case unwabi: case br_gr: case copy_state: case fr_mem: case frgr_mem: case gr_gr: case gr_mem: case label_state: case rp_br: case spill_base: case spill_mask: /* nothing */ break; default: as_bad (_("Unwind directive not followed by an instruction.")); break; } } unwind.current_entry = NULL; subseg_set (saved_seg, saved_subseg); if (md.qp.X_op == O_register) as_bad (_("qualifying predicate not followed by instruction")); } static void ia64_do_align (int nbytes) { char *saved_input_line_pointer = input_line_pointer; input_line_pointer = ""; s_align_bytes (nbytes); input_line_pointer = saved_input_line_pointer; } void ia64_cons_align (int nbytes) { if (md.auto_align) { char *saved_input_line_pointer = input_line_pointer; input_line_pointer = ""; s_align_bytes (nbytes); input_line_pointer = saved_input_line_pointer; } } #ifdef TE_VMS /* .vms_common section, symbol, size, alignment */ static void obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED) { char *sec_name; char *sym_name; char c; offsetT size; offsetT cur_size; offsetT temp; symbolS *symbolP; segT current_seg = now_seg; subsegT current_subseg = now_subseg; offsetT log_align; /* Section name. */ sec_name = obj_elf_section_name (); if (sec_name == NULL) return; /* Symbol name. */ SKIP_WHITESPACE (); if (*input_line_pointer == ',') { input_line_pointer++; SKIP_WHITESPACE (); } else { as_bad (_("expected ',' after section name")); ignore_rest_of_line (); return; } c = get_symbol_name (&sym_name); if (input_line_pointer == sym_name) { (void) restore_line_pointer (c); as_bad (_("expected symbol name")); ignore_rest_of_line (); return; } symbolP = symbol_find_or_make (sym_name); (void) restore_line_pointer (c); if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP)) && !S_IS_COMMON (symbolP)) { as_bad (_("Ignoring attempt to re-define symbol")); ignore_rest_of_line (); return; } /* Symbol size. */ SKIP_WHITESPACE (); if (*input_line_pointer == ',') { input_line_pointer++; SKIP_WHITESPACE (); } else { as_bad (_("expected ',' after symbol name")); ignore_rest_of_line (); return; } temp = get_absolute_expression (); size = temp; size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1; if (temp != size) { as_warn (_("size (%ld) out of range, ignored"), (long) temp); ignore_rest_of_line (); return; } /* Alignment. */ SKIP_WHITESPACE (); if (*input_line_pointer == ',') { input_line_pointer++; SKIP_WHITESPACE (); } else { as_bad (_("expected ',' after symbol size")); ignore_rest_of_line (); return; } log_align = get_absolute_expression (); demand_empty_rest_of_line (); obj_elf_change_section (sec_name, SHT_NOBITS, SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL, 0, NULL, 1, 0); S_SET_VALUE (symbolP, 0); S_SET_SIZE (symbolP, size); S_SET_EXTERNAL (symbolP); S_SET_SEGMENT (symbolP, now_seg); symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT; record_alignment (now_seg, log_align); cur_size = bfd_section_size (stdoutput, now_seg); if ((int) size > cur_size) { char *pfrag = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL, (valueT)size - (valueT)cur_size, NULL); *pfrag = 0; bfd_section_size (stdoutput, now_seg) = size; } /* Switch back to current segment. */ subseg_set (current_seg, current_subseg); #ifdef md_elf_section_change_hook md_elf_section_change_hook (); #endif } #endif /* TE_VMS */ /* Output COUNT bytes to a memory location. */ static char *vbyte_mem_ptr = NULL; static void output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED) { int x; if (vbyte_mem_ptr == NULL) abort (); if (count == 0) return; for (x = 0; x < count; x++) *(vbyte_mem_ptr++) = ptr[x]; } /* Count the number of bytes required for records. */ static int vbyte_count = 0; static void count_output (int count, char *ptr ATTRIBUTE_UNUSED, char *comment ATTRIBUTE_UNUSED) { vbyte_count += count; } static void output_R1_format (vbyte_func f, unw_record_type rtype, int rlen) { int r = 0; char byte; if (rlen > 0x1f) { output_R3_format (f, rtype, rlen); return; } if (rtype == body) r = 1; else if (rtype != prologue) as_bad (_("record type is not valid")); byte = UNW_R1 | (r << 5) | (rlen & 0x1f); (*f) (1, &byte, NULL); } static void output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen) { char bytes[20]; int count = 2; mask = (mask & 0x0f); grsave = (grsave & 0x7f); bytes[0] = (UNW_R2 | (mask >> 1)); bytes[1] = (((mask & 0x01) << 7) | grsave); count += output_leb128 (bytes + 2, rlen, 0); (*f) (count, bytes, NULL); } static void output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen) { int r = 0, count; char bytes[20]; if (rlen <= 0x1f) { output_R1_format (f, rtype, rlen); return; } if (rtype == body) r = 1; else if (rtype != prologue) as_bad (_("record type is not valid")); bytes[0] = (UNW_R3 | r); count = output_leb128 (bytes + 1, rlen, 0); (*f) (count + 1, bytes, NULL); } static void output_P1_format (vbyte_func f, int brmask) { char byte; byte = UNW_P1 | (brmask & 0x1f); (*f) (1, &byte, NULL); } static void output_P2_format (vbyte_func f, int brmask, int gr) { char bytes[2]; brmask = (brmask & 0x1f); bytes[0] = UNW_P2 | (brmask >> 1); bytes[1] = (((brmask & 1) << 7) | gr); (*f) (2, bytes, NULL); } static void output_P3_format (vbyte_func f, unw_record_type rtype, int reg) { char bytes[2]; int r = 0; reg = (reg & 0x7f); switch (rtype) { case psp_gr: r = 0; break; case rp_gr: r = 1; break; case pfs_gr: r = 2; break; case preds_gr: r = 3; break; case unat_gr: r = 4; break; case lc_gr: r = 5; break; case rp_br: r = 6; break; case rnat_gr: r = 7; break; case bsp_gr: r = 8; break; case bspstore_gr: r = 9; break; case fpsr_gr: r = 10; break; case priunat_gr: r = 11; break; default: as_bad (_("Invalid record type for P3 format.")); } bytes[0] = (UNW_P3 | (r >> 1)); bytes[1] = (((r & 1) << 7) | reg); (*f) (2, bytes, NULL); } static void output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size) { imask[0] = UNW_P4; (*f) (imask_size, (char *) imask, NULL); } static void output_P5_format (vbyte_func f, int grmask, unsigned long frmask) { char bytes[4]; grmask = (grmask & 0x0f); bytes[0] = UNW_P5; bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16)); bytes[2] = ((frmask & 0x0000ff00) >> 8); bytes[3] = (frmask & 0x000000ff); (*f) (4, bytes, NULL); } static void output_P6_format (vbyte_func f, unw_record_type rtype, int rmask) { char byte; int r = 0; if (rtype == gr_mem) r = 1; else if (rtype != fr_mem) as_bad (_("Invalid record type for format P6")); byte = (UNW_P6 | (r << 4) | (rmask & 0x0f)); (*f) (1, &byte, NULL); } static void output_P7_format (vbyte_func f, unw_record_type rtype, unsigned long w1, unsigned long w2) { char bytes[20]; int count = 1; int r = 0; count += output_leb128 (bytes + 1, w1, 0); switch (rtype) { case mem_stack_f: r = 0; count += output_leb128 (bytes + count, w2 >> 4, 0); break; case mem_stack_v: r = 1; break; case spill_base: r = 2; break; case psp_sprel: r = 3; break; case rp_when: r = 4; break; case rp_psprel: r = 5; break; case pfs_when: r = 6; break; case pfs_psprel: r = 7; break; case preds_when: r = 8; break; case preds_psprel: r = 9; break; case lc_when: r = 10; break; case lc_psprel: r = 11; break; case unat_when: r = 12; break; case unat_psprel: r = 13; break; case fpsr_when: r = 14; break; case fpsr_psprel: r = 15; break; default: break; } bytes[0] = (UNW_P7 | r); (*f) (count, bytes, NULL); } static void output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t) { char bytes[20]; int r = 0; int count = 2; bytes[0] = UNW_P8; switch (rtype) { case rp_sprel: r = 1; break; case pfs_sprel: r = 2; break; case preds_sprel: r = 3; break; case lc_sprel: r = 4; break; case unat_sprel: r = 5; break; case fpsr_sprel: r = 6; break; case bsp_when: r = 7; break; case bsp_psprel: r = 8; break; case bsp_sprel: r = 9; break; case bspstore_when: r = 10; break; case bspstore_psprel: r = 11; break; case bspstore_sprel: r = 12; break; case rnat_when: r = 13; break; case rnat_psprel: r = 14; break; case rnat_sprel: r = 15; break; case priunat_when_gr: r = 16; break; case priunat_psprel: r = 17; break; case priunat_sprel: r = 18; break; case priunat_when_mem: r = 19; break; default: break; } bytes[1] = r; count += output_leb128 (bytes + 2, t, 0); (*f) (count, bytes, NULL); } static void output_P9_format (vbyte_func f, int grmask, int gr) { char bytes[3]; bytes[0] = UNW_P9; bytes[1] = (grmask & 0x0f); bytes[2] = (gr & 0x7f); (*f) (3, bytes, NULL); } static void output_P10_format (vbyte_func f, int abi, int context) { char bytes[3]; bytes[0] = UNW_P10; bytes[1] = (abi & 0xff); bytes[2] = (context & 0xff); (*f) (3, bytes, NULL); } static void output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label) { char byte; int r = 0; if (label > 0x1f) { output_B4_format (f, rtype, label); return; } if (rtype == copy_state) r = 1; else if (rtype != label_state) as_bad (_("Invalid record type for format B1")); byte = (UNW_B1 | (r << 5) | (label & 0x1f)); (*f) (1, &byte, NULL); } static void output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t) { char bytes[20]; int count = 1; if (ecount > 0x1f) { output_B3_format (f, ecount, t); return; } bytes[0] = (UNW_B2 | (ecount & 0x1f)); count += output_leb128 (bytes + 1, t, 0); (*f) (count, bytes, NULL); } static void output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t) { char bytes[20]; int count = 1; if (ecount <= 0x1f) { output_B2_format (f, ecount, t); return; } bytes[0] = UNW_B3; count += output_leb128 (bytes + 1, t, 0); count += output_leb128 (bytes + count, ecount, 0); (*f) (count, bytes, NULL); } static void output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label) { char bytes[20]; int r = 0; int count = 1; if (label <= 0x1f) { output_B1_format (f, rtype, label); return; } if (rtype == copy_state) r = 1; else if (rtype != label_state) as_bad (_("Invalid record type for format B1")); bytes[0] = (UNW_B4 | (r << 3)); count += output_leb128 (bytes + 1, label, 0); (*f) (count, bytes, NULL); } static char format_ab_reg (int ab, int reg) { int ret; ab = (ab & 3); reg = (reg & 0x1f); ret = (ab << 5) | reg; return ret; } static void output_X1_format (vbyte_func f, unw_record_type rtype, int ab, int reg, unsigned long t, unsigned long w1) { char bytes[20]; int r = 0; int count = 2; bytes[0] = UNW_X1; if (rtype == spill_sprel) r = 1; else if (rtype != spill_psprel) as_bad (_("Invalid record type for format X1")); bytes[1] = ((r << 7) | format_ab_reg (ab, reg)); count += output_leb128 (bytes + 2, t, 0); count += output_leb128 (bytes + count, w1, 0); (*f) (count, bytes, NULL); } static void output_X2_format (vbyte_func f, int ab, int reg, int x, int y, int treg, unsigned long t) { char bytes[20]; int count = 3; bytes[0] = UNW_X2; bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg)); bytes[2] = (((y & 1) << 7) | (treg & 0x7f)); count += output_leb128 (bytes + 3, t, 0); (*f) (count, bytes, NULL); } static void output_X3_format (vbyte_func f, unw_record_type rtype, int qp, int ab, int reg, unsigned long t, unsigned long w1) { char bytes[20]; int r = 0; int count = 3; bytes[0] = UNW_X3; if (rtype == spill_sprel_p) r = 1; else if (rtype != spill_psprel_p) as_bad (_("Invalid record type for format X3")); bytes[1] = ((r << 7) | (qp & 0x3f)); bytes[2] = format_ab_reg (ab, reg); count += output_leb128 (bytes + 3, t, 0); count += output_leb128 (bytes + count, w1, 0); (*f) (count, bytes, NULL); } static void output_X4_format (vbyte_func f, int qp, int ab, int reg, int x, int y, int treg, unsigned long t) { char bytes[20]; int count = 4; bytes[0] = UNW_X4; bytes[1] = (qp & 0x3f); bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg)); bytes[3] = (((y & 1) << 7) | (treg & 0x7f)); count += output_leb128 (bytes + 4, t, 0); (*f) (count, bytes, NULL); } /* This function checks whether there are any outstanding .save-s and discards them if so. */ static void check_pending_save (void) { if (unwind.pending_saves) { unw_rec_list *cur, *prev; as_warn (_("Previous .save incomplete")); for (cur = unwind.list, prev = NULL; cur; ) if (&cur->r.record.p == unwind.pending_saves) { if (prev) prev->next = cur->next; else unwind.list = cur->next; if (cur == unwind.tail) unwind.tail = prev; if (cur == unwind.current_entry) unwind.current_entry = cur->next; /* Don't free the first discarded record, it's being used as terminator for (currently) br_gr and gr_gr processing, and also prevents leaving a dangling pointer to it in its predecessor. */ cur->r.record.p.grmask = 0; cur->r.record.p.brmask = 0; cur->r.record.p.frmask = 0; prev = cur->r.record.p.next; cur->r.record.p.next = NULL; cur = prev; break; } else { prev = cur; cur = cur->next; } while (cur) { prev = cur; cur = cur->r.record.p.next; free (prev); } unwind.pending_saves = NULL; } } /* This function allocates a record list structure, and initializes fields. */ static unw_rec_list * alloc_record (unw_record_type t) { unw_rec_list *ptr; ptr = xmalloc (sizeof (*ptr)); memset (ptr, 0, sizeof (*ptr)); ptr->slot_number = SLOT_NUM_NOT_SET; ptr->r.type = t; return ptr; } /* Dummy unwind record used for calculating the length of the last prologue or body region. */ static unw_rec_list * output_endp (void) { unw_rec_list *ptr = alloc_record (endp); return ptr; } static unw_rec_list * output_prologue (void) { unw_rec_list *ptr = alloc_record (prologue); memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask)); return ptr; } static unw_rec_list * output_prologue_gr (unsigned int saved_mask, unsigned int reg) { unw_rec_list *ptr = alloc_record (prologue_gr); memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask)); ptr->r.record.r.grmask = saved_mask; ptr->r.record.r.grsave = reg; return ptr; } static unw_rec_list * output_body (void) { unw_rec_list *ptr = alloc_record (body); return ptr; } static unw_rec_list * output_mem_stack_f (unsigned int size) { unw_rec_list *ptr = alloc_record (mem_stack_f); ptr->r.record.p.size = size; return ptr; } static unw_rec_list * output_mem_stack_v (void) { unw_rec_list *ptr = alloc_record (mem_stack_v); return ptr; } static unw_rec_list * output_psp_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (psp_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_psp_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (psp_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_rp_when (void) { unw_rec_list *ptr = alloc_record (rp_when); return ptr; } static unw_rec_list * output_rp_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (rp_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_rp_br (unsigned int br) { unw_rec_list *ptr = alloc_record (rp_br); ptr->r.record.p.r.br = br; return ptr; } static unw_rec_list * output_rp_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (rp_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_rp_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (rp_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_pfs_when (void) { unw_rec_list *ptr = alloc_record (pfs_when); return ptr; } static unw_rec_list * output_pfs_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (pfs_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_pfs_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (pfs_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_pfs_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (pfs_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_preds_when (void) { unw_rec_list *ptr = alloc_record (preds_when); return ptr; } static unw_rec_list * output_preds_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (preds_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_preds_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (preds_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_preds_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (preds_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_fr_mem (unsigned int mask) { unw_rec_list *ptr = alloc_record (fr_mem); unw_rec_list *cur = ptr; ptr->r.record.p.frmask = mask; unwind.pending_saves = &ptr->r.record.p; for (;;) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ mask &= ~(mask & (~mask + 1)); if (!mask) return ptr; cur = alloc_record (fr_mem); cur->r.record.p.frmask = mask; /* Retain only least significant bit. */ prev->r.record.p.frmask ^= mask; prev->r.record.p.next = cur; } } static unw_rec_list * output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask) { unw_rec_list *ptr = alloc_record (frgr_mem); unw_rec_list *cur = ptr; unwind.pending_saves = &cur->r.record.p; cur->r.record.p.frmask = fr_mask; while (fr_mask) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ fr_mask &= ~(fr_mask & (~fr_mask + 1)); if (!gr_mask && !fr_mask) return ptr; cur = alloc_record (frgr_mem); cur->r.record.p.frmask = fr_mask; /* Retain only least significant bit. */ prev->r.record.p.frmask ^= fr_mask; prev->r.record.p.next = cur; } cur->r.record.p.grmask = gr_mask; for (;;) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ gr_mask &= ~(gr_mask & (~gr_mask + 1)); if (!gr_mask) return ptr; cur = alloc_record (frgr_mem); cur->r.record.p.grmask = gr_mask; /* Retain only least significant bit. */ prev->r.record.p.grmask ^= gr_mask; prev->r.record.p.next = cur; } } static unw_rec_list * output_gr_gr (unsigned int mask, unsigned int reg) { unw_rec_list *ptr = alloc_record (gr_gr); unw_rec_list *cur = ptr; ptr->r.record.p.grmask = mask; ptr->r.record.p.r.gr = reg; unwind.pending_saves = &ptr->r.record.p; for (;;) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ mask &= ~(mask & (~mask + 1)); if (!mask) return ptr; cur = alloc_record (gr_gr); cur->r.record.p.grmask = mask; /* Indicate this record shouldn't be output. */ cur->r.record.p.r.gr = REG_NUM; /* Retain only least significant bit. */ prev->r.record.p.grmask ^= mask; prev->r.record.p.next = cur; } } static unw_rec_list * output_gr_mem (unsigned int mask) { unw_rec_list *ptr = alloc_record (gr_mem); unw_rec_list *cur = ptr; ptr->r.record.p.grmask = mask; unwind.pending_saves = &ptr->r.record.p; for (;;) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ mask &= ~(mask & (~mask + 1)); if (!mask) return ptr; cur = alloc_record (gr_mem); cur->r.record.p.grmask = mask; /* Retain only least significant bit. */ prev->r.record.p.grmask ^= mask; prev->r.record.p.next = cur; } } static unw_rec_list * output_br_mem (unsigned int mask) { unw_rec_list *ptr = alloc_record (br_mem); unw_rec_list *cur = ptr; ptr->r.record.p.brmask = mask; unwind.pending_saves = &ptr->r.record.p; for (;;) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ mask &= ~(mask & (~mask + 1)); if (!mask) return ptr; cur = alloc_record (br_mem); cur->r.record.p.brmask = mask; /* Retain only least significant bit. */ prev->r.record.p.brmask ^= mask; prev->r.record.p.next = cur; } } static unw_rec_list * output_br_gr (unsigned int mask, unsigned int reg) { unw_rec_list *ptr = alloc_record (br_gr); unw_rec_list *cur = ptr; ptr->r.record.p.brmask = mask; ptr->r.record.p.r.gr = reg; unwind.pending_saves = &ptr->r.record.p; for (;;) { unw_rec_list *prev = cur; /* Clear least significant set bit. */ mask &= ~(mask & (~mask + 1)); if (!mask) return ptr; cur = alloc_record (br_gr); cur->r.record.p.brmask = mask; /* Indicate this record shouldn't be output. */ cur->r.record.p.r.gr = REG_NUM; /* Retain only least significant bit. */ prev->r.record.p.brmask ^= mask; prev->r.record.p.next = cur; } } static unw_rec_list * output_spill_base (unsigned int offset) { unw_rec_list *ptr = alloc_record (spill_base); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_unat_when (void) { unw_rec_list *ptr = alloc_record (unat_when); return ptr; } static unw_rec_list * output_unat_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (unat_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_unat_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (unat_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_unat_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (unat_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_lc_when (void) { unw_rec_list *ptr = alloc_record (lc_when); return ptr; } static unw_rec_list * output_lc_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (lc_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_lc_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (lc_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_lc_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (lc_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_fpsr_when (void) { unw_rec_list *ptr = alloc_record (fpsr_when); return ptr; } static unw_rec_list * output_fpsr_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (fpsr_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_fpsr_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (fpsr_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_fpsr_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (fpsr_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_priunat_when_gr (void) { unw_rec_list *ptr = alloc_record (priunat_when_gr); return ptr; } static unw_rec_list * output_priunat_when_mem (void) { unw_rec_list *ptr = alloc_record (priunat_when_mem); return ptr; } static unw_rec_list * output_priunat_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (priunat_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_priunat_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (priunat_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_priunat_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (priunat_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_bsp_when (void) { unw_rec_list *ptr = alloc_record (bsp_when); return ptr; } static unw_rec_list * output_bsp_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (bsp_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_bsp_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (bsp_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_bsp_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (bsp_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_bspstore_when (void) { unw_rec_list *ptr = alloc_record (bspstore_when); return ptr; } static unw_rec_list * output_bspstore_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (bspstore_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_bspstore_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (bspstore_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_bspstore_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (bspstore_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_rnat_when (void) { unw_rec_list *ptr = alloc_record (rnat_when); return ptr; } static unw_rec_list * output_rnat_gr (unsigned int gr) { unw_rec_list *ptr = alloc_record (rnat_gr); ptr->r.record.p.r.gr = gr; return ptr; } static unw_rec_list * output_rnat_psprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (rnat_psprel); ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); return ptr; } static unw_rec_list * output_rnat_sprel (unsigned int offset) { unw_rec_list *ptr = alloc_record (rnat_sprel); ptr->r.record.p.off.sp = offset / 4; return ptr; } static unw_rec_list * output_unwabi (unsigned long abi, unsigned long context) { unw_rec_list *ptr = alloc_record (unwabi); ptr->r.record.p.abi = abi; ptr->r.record.p.context = context; return ptr; } static unw_rec_list * output_epilogue (unsigned long ecount) { unw_rec_list *ptr = alloc_record (epilogue); ptr->r.record.b.ecount = ecount; return ptr; } static unw_rec_list * output_label_state (unsigned long label) { unw_rec_list *ptr = alloc_record (label_state); ptr->r.record.b.label = label; return ptr; } static unw_rec_list * output_copy_state (unsigned long label) { unw_rec_list *ptr = alloc_record (copy_state); ptr->r.record.b.label = label; return ptr; } static unw_rec_list * output_spill_psprel (unsigned int ab, unsigned int reg, unsigned int offset, unsigned int predicate) { unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel); ptr->r.record.x.ab = ab; ptr->r.record.x.reg = reg; ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset); ptr->r.record.x.qp = predicate; return ptr; } static unw_rec_list * output_spill_sprel (unsigned int ab, unsigned int reg, unsigned int offset, unsigned int predicate) { unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel); ptr->r.record.x.ab = ab; ptr->r.record.x.reg = reg; ptr->r.record.x.where.spoff = offset / 4; ptr->r.record.x.qp = predicate; return ptr; } static unw_rec_list * output_spill_reg (unsigned int ab, unsigned int reg, unsigned int targ_reg, unsigned int xy, unsigned int predicate) { unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg); ptr->r.record.x.ab = ab; ptr->r.record.x.reg = reg; ptr->r.record.x.where.reg = targ_reg; ptr->r.record.x.xy = xy; ptr->r.record.x.qp = predicate; return ptr; } /* Given a unw_rec_list process the correct format with the specified function. */ static void process_one_record (unw_rec_list *ptr, vbyte_func f) { unsigned int fr_mask, gr_mask; switch (ptr->r.type) { /* This is a dummy record that takes up no space in the output. */ case endp: break; case gr_mem: case fr_mem: case br_mem: case frgr_mem: /* These are taken care of by prologue/prologue_gr. */ break; case prologue_gr: case prologue: if (ptr->r.type == prologue_gr) output_R2_format (f, ptr->r.record.r.grmask, ptr->r.record.r.grsave, ptr->r.record.r.rlen); else output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen); /* Output descriptor(s) for union of register spills (if any). */ gr_mask = ptr->r.record.r.mask.gr_mem; fr_mask = ptr->r.record.r.mask.fr_mem; if (fr_mask) { if ((fr_mask & ~0xfUL) == 0) output_P6_format (f, fr_mem, fr_mask); else { output_P5_format (f, gr_mask, fr_mask); gr_mask = 0; } } if (gr_mask) output_P6_format (f, gr_mem, gr_mask); if (ptr->r.record.r.mask.br_mem) output_P1_format (f, ptr->r.record.r.mask.br_mem); /* output imask descriptor if necessary: */ if (ptr->r.record.r.mask.i) output_P4_format (f, ptr->r.record.r.mask.i, ptr->r.record.r.imask_size); break; case body: output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen); break; case mem_stack_f: case mem_stack_v: output_P7_format (f, ptr->r.type, ptr->r.record.p.t, ptr->r.record.p.size); break; case psp_gr: case rp_gr: case pfs_gr: case preds_gr: case unat_gr: case lc_gr: case fpsr_gr: case priunat_gr: case bsp_gr: case bspstore_gr: case rnat_gr: output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr); break; case rp_br: output_P3_format (f, rp_br, ptr->r.record.p.r.br); break; case psp_sprel: output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0); break; case rp_when: case pfs_when: case preds_when: case unat_when: case lc_when: case fpsr_when: output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0); break; case rp_psprel: case pfs_psprel: case preds_psprel: case unat_psprel: case lc_psprel: case fpsr_psprel: case spill_base: output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0); break; case rp_sprel: case pfs_sprel: case preds_sprel: case unat_sprel: case lc_sprel: case fpsr_sprel: case priunat_sprel: case bsp_sprel: case bspstore_sprel: case rnat_sprel: output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp); break; case gr_gr: if (ptr->r.record.p.r.gr < REG_NUM) { const unw_rec_list *cur = ptr; gr_mask = cur->r.record.p.grmask; while ((cur = cur->r.record.p.next) != NULL) gr_mask |= cur->r.record.p.grmask; output_P9_format (f, gr_mask, ptr->r.record.p.r.gr); } break; case br_gr: if (ptr->r.record.p.r.gr < REG_NUM) { const unw_rec_list *cur = ptr; gr_mask = cur->r.record.p.brmask; while ((cur = cur->r.record.p.next) != NULL) gr_mask |= cur->r.record.p.brmask; output_P2_format (f, gr_mask, ptr->r.record.p.r.gr); } break; case spill_mask: as_bad (_("spill_mask record unimplemented.")); break; case priunat_when_gr: case priunat_when_mem: case bsp_when: case bspstore_when: case rnat_when: output_P8_format (f, ptr->r.type, ptr->r.record.p.t); break; case priunat_psprel: case bsp_psprel: case bspstore_psprel: case rnat_psprel: output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp); break; case unwabi: output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context); break; case epilogue: output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t); break; case label_state: case copy_state: output_B4_format (f, ptr->r.type, ptr->r.record.b.label); break; case spill_psprel: output_X1_format (f, ptr->r.type, ptr->r.record.x.ab, ptr->r.record.x.reg, ptr->r.record.x.t, ptr->r.record.x.where.pspoff); break; case spill_sprel: output_X1_format (f, ptr->r.type, ptr->r.record.x.ab, ptr->r.record.x.reg, ptr->r.record.x.t, ptr->r.record.x.where.spoff); break; case spill_reg: output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg, ptr->r.record.x.xy >> 1, ptr->r.record.x.xy, ptr->r.record.x.where.reg, ptr->r.record.x.t); break; case spill_psprel_p: output_X3_format (f, ptr->r.type, ptr->r.record.x.qp, ptr->r.record.x.ab, ptr->r.record.x.reg, ptr->r.record.x.t, ptr->r.record.x.where.pspoff); break; case spill_sprel_p: output_X3_format (f, ptr->r.type, ptr->r.record.x.qp, ptr->r.record.x.ab, ptr->r.record.x.reg, ptr->r.record.x.t, ptr->r.record.x.where.spoff); break; case spill_reg_p: output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab, ptr->r.record.x.reg, ptr->r.record.x.xy >> 1, ptr->r.record.x.xy, ptr->r.record.x.where.reg, ptr->r.record.x.t); break; default: as_bad (_("record_type_not_valid")); break; } } /* Given a unw_rec_list list, process all the records with the specified function. */ static void process_unw_records (unw_rec_list *list, vbyte_func f) { unw_rec_list *ptr; for (ptr = list; ptr; ptr = ptr->next) process_one_record (ptr, f); } /* Determine the size of a record list in bytes. */ static int calc_record_size (unw_rec_list *list) { vbyte_count = 0; process_unw_records (list, count_output); return vbyte_count; } /* Return the number of bits set in the input value. Perhaps this has a better place... */ #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # define popcount __builtin_popcount #else static int popcount (unsigned x) { static const unsigned char popcnt[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; if (x < NELEMS (popcnt)) return popcnt[x]; return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt)); } #endif /* Update IMASK bitmask to reflect the fact that one or more registers of type TYPE are saved starting at instruction with index T. If N bits are set in REGMASK, it is assumed that instructions T through T+N-1 save these registers. TYPE values: 0: no save 1: instruction saves next fp reg 2: instruction saves next general reg 3: instruction saves next branch reg */ static void set_imask (unw_rec_list *region, unsigned long regmask, unsigned long t, unsigned int type) { unsigned char *imask; unsigned long imask_size; unsigned int i; int pos; imask = region->r.record.r.mask.i; imask_size = region->r.record.r.imask_size; if (!imask) { imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1; imask = xmalloc (imask_size); memset (imask, 0, imask_size); region->r.record.r.imask_size = imask_size; region->r.record.r.mask.i = imask; } i = (t / 4) + 1; pos = 2 * (3 - t % 4); while (regmask) { if (i >= imask_size) { as_bad (_("Ignoring attempt to spill beyond end of region")); return; } imask[i] |= (type & 0x3) << pos; regmask &= (regmask - 1); pos -= 2; if (pos < 0) { pos = 0; ++i; } } } /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR. SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates for frag sizes. */ static unsigned long slot_index (unsigned long slot_addr, fragS *slot_frag, unsigned long first_addr, fragS *first_frag, int before_relax) { unsigned long s_index = 0; /* First time we are called, the initial address and frag are invalid. */ if (first_addr == 0) return 0; /* If the two addresses are in different frags, then we need to add in the remaining size of this frag, and then the entire size of intermediate frags. */ while (slot_frag != first_frag) { unsigned long start_addr = (unsigned long) &first_frag->fr_literal; if (! before_relax) { /* We can get the final addresses only during and after relaxation. */ if (first_frag->fr_next && first_frag->fr_next->fr_address) s_index += 3 * ((first_frag->fr_next->fr_address - first_frag->fr_address - first_frag->fr_fix) >> 4); } else /* We don't know what the final addresses will be. We try our best to estimate. */ switch (first_frag->fr_type) { default: break; case rs_space: as_fatal (_("Only constant space allocation is supported")); break; case rs_align: case rs_align_code: case rs_align_test: /* Take alignment into account. Assume the worst case before relaxation. */ s_index += 3 * ((1 << first_frag->fr_offset) >> 4); break; case rs_org: if (first_frag->fr_symbol) { as_fatal (_("Only constant offsets are supported")); break; } case rs_fill: s_index += 3 * (first_frag->fr_offset >> 4); break; } /* Add in the full size of the frag converted to instruction slots. */ s_index += 3 * (first_frag->fr_fix >> 4); /* Subtract away the initial part before first_addr. */ s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4)) + ((first_addr & 0x3) - (start_addr & 0x3))); /* Move to the beginning of the next frag. */ first_frag = first_frag->fr_next; first_addr = (unsigned long) &first_frag->fr_literal; /* This can happen if there is section switching in the middle of a function, causing the frag chain for the function to be broken. It is too difficult to recover safely from this problem, so we just exit with an error. */ if (first_frag == NULL) as_fatal (_("Section switching in code is not supported.")); } /* Add in the used part of the last frag. */ s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4)) + ((slot_addr & 0x3) - (first_addr & 0x3))); return s_index; } /* Optimize unwind record directives. */ static unw_rec_list * optimize_unw_records (unw_rec_list *list) { if (!list) return NULL; /* If the only unwind record is ".prologue" or ".prologue" followed by ".body", then we can optimize the unwind directives away. */ if (list->r.type == prologue && (list->next->r.type == endp || (list->next->r.type == body && list->next->next->r.type == endp))) return NULL; return list; } /* Given a complete record list, process any records which have unresolved fields, (ie length counts for a prologue). After this has been run, all necessary information should be available within each record to generate an image. */ static void fixup_unw_records (unw_rec_list *list, int before_relax) { unw_rec_list *ptr, *region = 0; unsigned long first_addr = 0, rlen = 0, t; fragS *first_frag = 0; for (ptr = list; ptr; ptr = ptr->next) { if (ptr->slot_number == SLOT_NUM_NOT_SET) as_bad (_(" Insn slot not set in unwind record.")); t = slot_index (ptr->slot_number, ptr->slot_frag, first_addr, first_frag, before_relax); switch (ptr->r.type) { case prologue: case prologue_gr: case body: { unw_rec_list *last; int size; unsigned long last_addr = 0; fragS *last_frag = NULL; first_addr = ptr->slot_number; first_frag = ptr->slot_frag; /* Find either the next body/prologue start, or the end of the function, and determine the size of the region. */ for (last = ptr->next; last != NULL; last = last->next) if (last->r.type == prologue || last->r.type == prologue_gr || last->r.type == body || last->r.type == endp) { last_addr = last->slot_number; last_frag = last->slot_frag; break; } size = slot_index (last_addr, last_frag, first_addr, first_frag, before_relax); rlen = ptr->r.record.r.rlen = size; if (ptr->r.type == body) /* End of region. */ region = 0; else region = ptr; break; } case epilogue: if (t < rlen) ptr->r.record.b.t = rlen - 1 - t; else /* This happens when a memory-stack-less procedure uses a ".restore sp" directive at the end of a region to pop the frame state. */ ptr->r.record.b.t = 0; break; case mem_stack_f: case mem_stack_v: case rp_when: case pfs_when: case preds_when: case unat_when: case lc_when: case fpsr_when: case priunat_when_gr: case priunat_when_mem: case bsp_when: case bspstore_when: case rnat_when: ptr->r.record.p.t = t; break; case spill_reg: case spill_sprel: case spill_psprel: case spill_reg_p: case spill_sprel_p: case spill_psprel_p: ptr->r.record.x.t = t; break; case frgr_mem: if (!region) { as_bad (_("frgr_mem record before region record!")); return; } region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask; region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask; set_imask (region, ptr->r.record.p.frmask, t, 1); set_imask (region, ptr->r.record.p.grmask, t, 2); break; case fr_mem: if (!region) { as_bad (_("fr_mem record before region record!")); return; } region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask; set_imask (region, ptr->r.record.p.frmask, t, 1); break; case gr_mem: if (!region) { as_bad (_("gr_mem record before region record!")); return; } region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask; set_imask (region, ptr->r.record.p.grmask, t, 2); break; case br_mem: if (!region) { as_bad (_("br_mem record before region record!")); return; } region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask; set_imask (region, ptr->r.record.p.brmask, t, 3); break; case gr_gr: if (!region) { as_bad (_("gr_gr record before region record!")); return; } set_imask (region, ptr->r.record.p.grmask, t, 2); break; case br_gr: if (!region) { as_bad (_("br_gr record before region record!")); return; } set_imask (region, ptr->r.record.p.brmask, t, 3); break; default: break; } } } /* Estimate the size of a frag before relaxing. We only have one type of frag to handle here, which is the unwind info frag. */ int ia64_estimate_size_before_relax (fragS *frag, asection *segtype ATTRIBUTE_UNUSED) { unw_rec_list *list; int len, size, pad; /* ??? This code is identical to the first part of ia64_convert_frag. */ list = (unw_rec_list *) frag->fr_opcode; fixup_unw_records (list, 0); len = calc_record_size (list); /* pad to pointer-size boundary. */ pad = len % md.pointer_size; if (pad != 0) len += md.pointer_size - pad; /* Add 8 for the header. */ size = len + 8; /* Add a pointer for the personality offset. */ if (frag->fr_offset) size += md.pointer_size; /* fr_var carries the max_chars that we created the fragment with. We must, of course, have allocated enough memory earlier. */ gas_assert (frag->fr_var >= size); return frag->fr_fix + size; } /* This function converts a rs_machine_dependent variant frag into a normal fill frag with the unwind image from the record list. */ void ia64_convert_frag (fragS *frag) { unw_rec_list *list; int len, size, pad; valueT flag_value; /* ??? This code is identical to ia64_estimate_size_before_relax. */ list = (unw_rec_list *) frag->fr_opcode; fixup_unw_records (list, 0); len = calc_record_size (list); /* pad to pointer-size boundary. */ pad = len % md.pointer_size; if (pad != 0) len += md.pointer_size - pad; /* Add 8 for the header. */ size = len + 8; /* Add a pointer for the personality offset. */ if (frag->fr_offset) size += md.pointer_size; /* fr_var carries the max_chars that we created the fragment with. We must, of course, have allocated enough memory earlier. */ gas_assert (frag->fr_var >= size); /* Initialize the header area. fr_offset is initialized with unwind.personality_routine. */ if (frag->fr_offset) { if (md.flags & EF_IA_64_ABI64) flag_value = (bfd_vma) 3 << 32; else /* 32-bit unwind info block. */ flag_value = (bfd_vma) 0x1003 << 32; } else flag_value = 0; md_number_to_chars (frag->fr_literal, (((bfd_vma) 1 << 48) /* Version. */ | flag_value /* U & E handler flags. */ | (len / md.pointer_size)), /* Length. */ 8); /* Skip the header. */ vbyte_mem_ptr = frag->fr_literal + 8; process_unw_records (list, output_vbyte_mem); /* Fill the padding bytes with zeros. */ if (pad != 0) md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0, md.pointer_size - pad); /* Fill the unwind personality with zeros. */ if (frag->fr_offset) md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0, md.pointer_size); frag->fr_fix += size; frag->fr_type = rs_fill; frag->fr_var = 0; frag->fr_offset = 0; } static int parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po) { int sep = parse_operand_and_eval (e, ','); *qp = e->X_add_number - REG_P; if (e->X_op != O_register || *qp > 63) { as_bad (_("First operand to .%s must be a predicate"), po); *qp = 0; } else if (*qp == 0) as_warn (_("Pointless use of p0 as first operand to .%s"), po); if (sep == ',') sep = parse_operand_and_eval (e, ','); else e->X_op = O_absent; return sep; } static void convert_expr_to_ab_reg (const expressionS *e, unsigned int *ab, unsigned int *regp, const char *po, int n) { unsigned int reg = e->X_add_number; *ab = *regp = 0; /* Anything valid is good here. */ if (e->X_op != O_register) reg = REG_GR; /* Anything invalid is good here. */ if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7)) { *ab = 0; *regp = reg - REG_GR; } else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5)) || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31))) { *ab = 1; *regp = reg - REG_FR; } else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5)) { *ab = 2; *regp = reg - REG_BR; } else { *ab = 3; switch (reg) { case REG_PR: *regp = 0; break; case REG_PSP: *regp = 1; break; case REG_PRIUNAT: *regp = 2; break; case REG_BR + 0: *regp = 3; break; case REG_AR + AR_BSP: *regp = 4; break; case REG_AR + AR_BSPSTORE: *regp = 5; break; case REG_AR + AR_RNAT: *regp = 6; break; case REG_AR + AR_UNAT: *regp = 7; break; case REG_AR + AR_FPSR: *regp = 8; break; case REG_AR + AR_PFS: *regp = 9; break; case REG_AR + AR_LC: *regp = 10; break; default: as_bad (_("Operand %d to .%s must be a preserved register"), n, po); break; } } } static void convert_expr_to_xy_reg (const expressionS *e, unsigned int *xy, unsigned int *regp, const char *po, int n) { unsigned int reg = e->X_add_number; *xy = *regp = 0; /* Anything valid is good here. */ if (e->X_op != O_register) reg = REG_GR; /* Anything invalid is good here. */ if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127)) { *xy = 0; *regp = reg - REG_GR; } else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127)) { *xy = 1; *regp = reg - REG_FR; } else if (reg >= REG_BR && reg <= (REG_BR + 7)) { *xy = 2; *regp = reg - REG_BR; } else as_bad (_("Operand %d to .%s must be a writable register"), n, po); } static void dot_align (int arg) { /* The current frag is an alignment frag. */ align_frag = frag_now; s_align_bytes (arg); } static void dot_radix (int dummy ATTRIBUTE_UNUSED) { char *radix; int ch; SKIP_WHITESPACE (); if (is_it_end_of_statement ()) return; ch = get_symbol_name (&radix); ia64_canonicalize_symbol_name (radix); if (strcasecmp (radix, "C")) as_bad (_("Radix `%s' unsupported or invalid"), radix); (void) restore_line_pointer (ch); demand_empty_rest_of_line (); } /* Helper function for .loc directives. If the assembler is not generating line number info, then we need to remember which instructions have a .loc directive, and only call dwarf2_gen_line_info for those instructions. */ static void dot_loc (int x) { CURR_SLOT.loc_directive_seen = 1; dwarf2_directive_loc (x); } /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */ static void dot_special_section (int which) { set_section ((char *) special_section_name[which]); } /* Return -1 for warning and 0 for error. */ static int unwind_diagnostic (const char * region, const char *directive) { if (md.unwind_check == unwind_check_warning) { as_warn (_(".%s outside of %s"), directive, region); return -1; } else { as_bad (_(".%s outside of %s"), directive, region); ignore_rest_of_line (); return 0; } } /* Return 1 if a directive is in a procedure, -1 if a directive isn't in a procedure but the unwind directive check is set to warning, 0 if a directive isn't in a procedure and the unwind directive check is set to error. */ static int in_procedure (const char *directive) { if (unwind.proc_pending.sym && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0)) return 1; return unwind_diagnostic ("procedure", directive); } /* Return 1 if a directive is in a prologue, -1 if a directive isn't in a prologue but the unwind directive check is set to warning, 0 if a directive isn't in a prologue and the unwind directive check is set to error. */ static int in_prologue (const char *directive) { int in = in_procedure (directive); if (in > 0 && !unwind.prologue) in = unwind_diagnostic ("prologue", directive); check_pending_save (); return in; } /* Return 1 if a directive is in a body, -1 if a directive isn't in a body but the unwind directive check is set to warning, 0 if a directive isn't in a body and the unwind directive check is set to error. */ static int in_body (const char *directive) { int in = in_procedure (directive); if (in > 0 && !unwind.body) in = unwind_diagnostic ("body region", directive); return in; } static void add_unwind_entry (unw_rec_list *ptr, int sep) { if (ptr) { if (unwind.tail) unwind.tail->next = ptr; else unwind.list = ptr; unwind.tail = ptr; /* The current entry can in fact be a chain of unwind entries. */ if (unwind.current_entry == NULL) unwind.current_entry = ptr; } /* The current entry can in fact be a chain of unwind entries. */ if (unwind.current_entry == NULL) unwind.current_entry = ptr; if (sep == ',') { char *name; /* Parse a tag permitted for the current directive. */ int ch; SKIP_WHITESPACE (); ch = get_symbol_name (&name); /* FIXME: For now, just issue a warning that this isn't implemented. */ { static int warned; if (!warned) { warned = 1; as_warn (_("Tags on unwind pseudo-ops aren't supported, yet")); } } (void) restore_line_pointer (ch); } if (sep != NOT_A_CHAR) demand_empty_rest_of_line (); } static void dot_fframe (int dummy ATTRIBUTE_UNUSED) { expressionS e; int sep; if (!in_prologue ("fframe")) return; sep = parse_operand_and_eval (&e, ','); if (e.X_op != O_constant) { as_bad (_("First operand to .fframe must be a constant")); e.X_add_number = 0; } add_unwind_entry (output_mem_stack_f (e.X_add_number), sep); } static void dot_vframe (int dummy ATTRIBUTE_UNUSED) { expressionS e; unsigned reg; int sep; if (!in_prologue ("vframe")) return; sep = parse_operand_and_eval (&e, ','); reg = e.X_add_number - REG_GR; if (e.X_op != O_register || reg > 127) { as_bad (_("First operand to .vframe must be a general register")); reg = 0; } add_unwind_entry (output_mem_stack_v (), sep); if (! (unwind.prologue_mask & 2)) add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR); else if (reg != unwind.prologue_gr + (unsigned) popcount (unwind.prologue_mask & -(2 << 1))) as_warn (_("Operand of .vframe contradicts .prologue")); } static void dot_vframesp (int psp) { expressionS e; int sep; if (psp) as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant")); if (!in_prologue ("vframesp")) return; sep = parse_operand_and_eval (&e, ','); if (e.X_op != O_constant) { as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)")); e.X_add_number = 0; } add_unwind_entry (output_mem_stack_v (), sep); add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR); } static void dot_save (int dummy ATTRIBUTE_UNUSED) { expressionS e1, e2; unsigned reg1, reg2; int sep; if (!in_prologue ("save")) return; sep = parse_operand_and_eval (&e1, ','); if (sep == ',') sep = parse_operand_and_eval (&e2, ','); else e2.X_op = O_absent; reg1 = e1.X_add_number; /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */ if (e1.X_op != O_register) { as_bad (_("First operand to .save not a register")); reg1 = REG_PR; /* Anything valid is good here. */ } reg2 = e2.X_add_number - REG_GR; if (e2.X_op != O_register || reg2 > 127) { as_bad (_("Second operand to .save not a valid register")); reg2 = 0; } switch (reg1) { case REG_AR + AR_BSP: add_unwind_entry (output_bsp_when (), sep); add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR); break; case REG_AR + AR_BSPSTORE: add_unwind_entry (output_bspstore_when (), sep); add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR); break; case REG_AR + AR_RNAT: add_unwind_entry (output_rnat_when (), sep); add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR); break; case REG_AR + AR_UNAT: add_unwind_entry (output_unat_when (), sep); add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR); break; case REG_AR + AR_FPSR: add_unwind_entry (output_fpsr_when (), sep); add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR); break; case REG_AR + AR_PFS: add_unwind_entry (output_pfs_when (), sep); if (! (unwind.prologue_mask & 4)) add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR); else if (reg2 != unwind.prologue_gr + (unsigned) popcount (unwind.prologue_mask & -(4 << 1))) as_warn (_("Second operand of .save contradicts .prologue")); break; case REG_AR + AR_LC: add_unwind_entry (output_lc_when (), sep); add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR); break; case REG_BR: add_unwind_entry (output_rp_when (), sep); if (! (unwind.prologue_mask & 8)) add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR); else if (reg2 != unwind.prologue_gr) as_warn (_("Second operand of .save contradicts .prologue")); break; case REG_PR: add_unwind_entry (output_preds_when (), sep); if (! (unwind.prologue_mask & 1)) add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR); else if (reg2 != unwind.prologue_gr + (unsigned) popcount (unwind.prologue_mask & -(1 << 1))) as_warn (_("Second operand of .save contradicts .prologue")); break; case REG_PRIUNAT: add_unwind_entry (output_priunat_when_gr (), sep); add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR); break; default: as_bad (_("First operand to .save not a valid register")); add_unwind_entry (NULL, sep); break; } } static void dot_restore (int dummy ATTRIBUTE_UNUSED) { expressionS e1; unsigned long ecount; /* # of _additional_ regions to pop */ int sep; if (!in_body ("restore")) return; sep = parse_operand_and_eval (&e1, ','); if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12) as_bad (_("First operand to .restore must be stack pointer (sp)")); if (sep == ',') { expressionS e2; sep = parse_operand_and_eval (&e2, ','); if (e2.X_op != O_constant || e2.X_add_number < 0) { as_bad (_("Second operand to .restore must be a constant >= 0")); e2.X_add_number = 0; } ecount = e2.X_add_number; } else ecount = unwind.prologue_count - 1; if (ecount >= unwind.prologue_count) { as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"), ecount + 1, unwind.prologue_count); ecount = 0; } add_unwind_entry (output_epilogue (ecount), sep); if (ecount < unwind.prologue_count) unwind.prologue_count -= ecount + 1; else unwind.prologue_count = 0; } static void dot_restorereg (int pred) { unsigned int qp, ab, reg; expressionS e; int sep; const char * const po = pred ? "restorereg.p" : "restorereg"; if (!in_procedure (po)) return; if (pred) sep = parse_predicate_and_operand (&e, &qp, po); else { sep = parse_operand_and_eval (&e, ','); qp = 0; } convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred); add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep); } static char *special_linkonce_name[] = { ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi." }; static void start_unwind_section (const segT text_seg, int sec_index) { /* Use a slightly ugly scheme to derive the unwind section names from the text section name: text sect. unwind table sect. name: name: comments: ---------- ----------------- -------------------------------- .text .IA_64.unwind .text.foo .IA_64.unwind.text.foo .foo .IA_64.unwind.foo .gnu.linkonce.t.foo .gnu.linkonce.ia64unw.foo _info .IA_64.unwind_info gas issues error message (ditto) _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto) This mapping is done so that: (a) An object file with unwind info only in .text will use unwind section names .IA_64.unwind and .IA_64.unwind_info. This follows the letter of the ABI and also ensures backwards compatibility with older toolchains. (b) An object file with unwind info in multiple text sections will use separate unwind sections for each text section. This allows us to properly set the "sh_info" and "sh_link" fields in SHT_IA_64_UNWIND as required by the ABI and also lets GNU ld support programs with multiple segments containing unwind info (as might be the case for certain embedded applications). (c) An error is issued if there would be a name clash. */ const char *text_name, *sec_text_name; char *sec_name; const char *prefix = special_section_name [sec_index]; const char *suffix; size_t prefix_len, suffix_len, sec_name_len; sec_text_name = segment_name (text_seg); text_name = sec_text_name; if (strncmp (text_name, "_info", 5) == 0) { as_bad (_("Illegal section name `%s' (causes unwind section name clash)"), text_name); ignore_rest_of_line (); return; } if (strcmp (text_name, ".text") == 0) text_name = ""; /* Build the unwind section name by appending the (possibly stripped) text section name to the unwind prefix. */ suffix = text_name; if (strncmp (text_name, ".gnu.linkonce.t.", sizeof (".gnu.linkonce.t.") - 1) == 0) { prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND]; suffix += sizeof (".gnu.linkonce.t.") - 1; } prefix_len = strlen (prefix); suffix_len = strlen (suffix); sec_name_len = prefix_len + suffix_len; sec_name = alloca (sec_name_len + 1); memcpy (sec_name, prefix, prefix_len); memcpy (sec_name + prefix_len, suffix, suffix_len); sec_name [sec_name_len] = '\0'; /* Handle COMDAT group. */ if ((text_seg->flags & SEC_LINK_ONCE) != 0 && (elf_section_flags (text_seg) & SHF_GROUP) != 0) { char *section; size_t len, group_name_len; const char *group_name = elf_group_name (text_seg); if (group_name == NULL) { as_bad (_("Group section `%s' has no group signature"), sec_text_name); ignore_rest_of_line (); return; } /* We have to construct a fake section directive. */ group_name_len = strlen (group_name); len = (sec_name_len + 16 /* ,"aG",@progbits, */ + group_name_len /* ,group_name */ + 7); /* ,comdat */ section = alloca (len + 1); memcpy (section, sec_name, sec_name_len); memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16); memcpy (section + sec_name_len + 16, group_name, group_name_len); memcpy (section + len - 7, ",comdat", 7); section [len] = '\0'; set_section (section); } else { set_section (sec_name); bfd_set_section_flags (stdoutput, now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY); } elf_linked_to_section (now_seg) = text_seg; } static void generate_unwind_image (const segT text_seg) { int size, pad; unw_rec_list *list; /* Mark the end of the unwind info, so that we can compute the size of the last unwind region. */ add_unwind_entry (output_endp (), NOT_A_CHAR); /* Force out pending instructions, to make sure all unwind records have a valid slot_number field. */ ia64_flush_insns (); /* Generate the unwind record. */ list = optimize_unw_records (unwind.list); fixup_unw_records (list, 1); size = calc_record_size (list); if (size > 0 || unwind.force_unwind_entry) { unwind.force_unwind_entry = 0; /* pad to pointer-size boundary. */ pad = size % md.pointer_size; if (pad != 0) size += md.pointer_size - pad; /* Add 8 for the header. */ size += 8; /* Add a pointer for the personality offset. */ if (unwind.personality_routine) size += md.pointer_size; } /* If there are unwind records, switch sections, and output the info. */ if (size != 0) { expressionS exp; bfd_reloc_code_real_type reloc; start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO); /* Make sure the section has 4 byte alignment for ILP32 and 8 byte alignment for LP64. */ frag_align (md.pointer_size_shift, 0, 0); record_alignment (now_seg, md.pointer_size_shift); /* Set expression which points to start of unwind descriptor area. */ unwind.info = expr_build_dot (); frag_var (rs_machine_dependent, size, size, 0, 0, (offsetT) (long) unwind.personality_routine, (char *) list); /* Add the personality address to the image. */ if (unwind.personality_routine != 0) { exp.X_op = O_symbol; exp.X_add_symbol = unwind.personality_routine; exp.X_add_number = 0; if (md.flags & EF_IA_64_BE) { if (md.flags & EF_IA_64_ABI64) reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB; else reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB; } else { if (md.flags & EF_IA_64_ABI64) reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB; else reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB; } fix_new_exp (frag_now, frag_now_fix () - md.pointer_size, md.pointer_size, &exp, 0, reloc); unwind.personality_routine = 0; } } free_saved_prologue_counts (); unwind.list = unwind.tail = unwind.current_entry = NULL; } static void dot_handlerdata (int dummy ATTRIBUTE_UNUSED) { if (!in_procedure ("handlerdata")) return; unwind.force_unwind_entry = 1; /* Remember which segment we're in so we can switch back after .endp */ unwind.saved_text_seg = now_seg; unwind.saved_text_subseg = now_subseg; /* Generate unwind info into unwind-info section and then leave that section as the currently active one so dataXX directives go into the language specific data area of the unwind info block. */ generate_unwind_image (now_seg); demand_empty_rest_of_line (); } static void dot_unwentry (int dummy ATTRIBUTE_UNUSED) { if (!in_procedure ("unwentry")) return; unwind.force_unwind_entry = 1; demand_empty_rest_of_line (); } static void dot_altrp (int dummy ATTRIBUTE_UNUSED) { expressionS e; unsigned reg; if (!in_prologue ("altrp")) return; parse_operand_and_eval (&e, 0); reg = e.X_add_number - REG_BR; if (e.X_op != O_register || reg > 7) { as_bad (_("First operand to .altrp not a valid branch register")); reg = 0; } add_unwind_entry (output_rp_br (reg), 0); } static void dot_savemem (int psprel) { expressionS e1, e2; int sep; int reg1, val; const char * const po = psprel ? "savepsp" : "savesp"; if (!in_prologue (po)) return; sep = parse_operand_and_eval (&e1, ','); if (sep == ',') sep = parse_operand_and_eval (&e2, ','); else e2.X_op = O_absent; reg1 = e1.X_add_number; val = e2.X_add_number; /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */ if (e1.X_op != O_register) { as_bad (_("First operand to .%s not a register"), po); reg1 = REG_PR; /* Anything valid is good here. */ } if (e2.X_op != O_constant) { as_bad (_("Second operand to .%s not a constant"), po); val = 0; } switch (reg1) { case REG_AR + AR_BSP: add_unwind_entry (output_bsp_when (), sep); add_unwind_entry ((psprel ? output_bsp_psprel : output_bsp_sprel) (val), NOT_A_CHAR); break; case REG_AR + AR_BSPSTORE: add_unwind_entry (output_bspstore_when (), sep); add_unwind_entry ((psprel ? output_bspstore_psprel : output_bspstore_sprel) (val), NOT_A_CHAR); break; case REG_AR + AR_RNAT: add_unwind_entry (output_rnat_when (), sep); add_unwind_entry ((psprel ? output_rnat_psprel : output_rnat_sprel) (val), NOT_A_CHAR); break; case REG_AR + AR_UNAT: add_unwind_entry (output_unat_when (), sep); add_unwind_entry ((psprel ? output_unat_psprel : output_unat_sprel) (val), NOT_A_CHAR); break; case REG_AR + AR_FPSR: add_unwind_entry (output_fpsr_when (), sep); add_unwind_entry ((psprel ? output_fpsr_psprel : output_fpsr_sprel) (val), NOT_A_CHAR); break; case REG_AR + AR_PFS: add_unwind_entry (output_pfs_when (), sep); add_unwind_entry ((psprel ? output_pfs_psprel : output_pfs_sprel) (val), NOT_A_CHAR); break; case REG_AR + AR_LC: add_unwind_entry (output_lc_when (), sep); add_unwind_entry ((psprel ? output_lc_psprel : output_lc_sprel) (val), NOT_A_CHAR); break; case REG_BR: add_unwind_entry (output_rp_when (), sep); add_unwind_entry ((psprel ? output_rp_psprel : output_rp_sprel) (val), NOT_A_CHAR); break; case REG_PR: add_unwind_entry (output_preds_when (), sep); add_unwind_entry ((psprel ? output_preds_psprel : output_preds_sprel) (val), NOT_A_CHAR); break; case REG_PRIUNAT: add_unwind_entry (output_priunat_when_mem (), sep); add_unwind_entry ((psprel ? output_priunat_psprel : output_priunat_sprel) (val), NOT_A_CHAR); break; default: as_bad (_("First operand to .%s not a valid register"), po); add_unwind_entry (NULL, sep); break; } } static void dot_saveg (int dummy ATTRIBUTE_UNUSED) { expressionS e; unsigned grmask; int sep; if (!in_prologue ("save.g")) return; sep = parse_operand_and_eval (&e, ','); grmask = e.X_add_number; if (e.X_op != O_constant || e.X_add_number <= 0 || e.X_add_number > 0xf) { as_bad (_("First operand to .save.g must be a positive 4-bit constant")); grmask = 0; } if (sep == ',') { unsigned reg; int n = popcount (grmask); parse_operand_and_eval (&e, 0); reg = e.X_add_number - REG_GR; if (e.X_op != O_register || reg > 127) { as_bad (_("Second operand to .save.g must be a general register")); reg = 0; } else if (reg > 128U - n) { as_bad (_("Second operand to .save.g must be the first of %d general registers"), n); reg = 0; } add_unwind_entry (output_gr_gr (grmask, reg), 0); } else add_unwind_entry (output_gr_mem (grmask), 0); } static void dot_savef (int dummy ATTRIBUTE_UNUSED) { expressionS e; if (!in_prologue ("save.f")) return; parse_operand_and_eval (&e, 0); if (e.X_op != O_constant || e.X_add_number <= 0 || e.X_add_number > 0xfffff) { as_bad (_("Operand to .save.f must be a positive 20-bit constant")); e.X_add_number = 0; } add_unwind_entry (output_fr_mem (e.X_add_number), 0); } static void dot_saveb (int dummy ATTRIBUTE_UNUSED) { expressionS e; unsigned brmask; int sep; if (!in_prologue ("save.b")) return; sep = parse_operand_and_eval (&e, ','); brmask = e.X_add_number; if (e.X_op != O_constant || e.X_add_number <= 0 || e.X_add_number > 0x1f) { as_bad (_("First operand to .save.b must be a positive 5-bit constant")); brmask = 0; } if (sep == ',') { unsigned reg; int n = popcount (brmask); parse_operand_and_eval (&e, 0); reg = e.X_add_number - REG_GR; if (e.X_op != O_register || reg > 127) { as_bad (_("Second operand to .save.b must be a general register")); reg = 0; } else if (reg > 128U - n) { as_bad (_("Second operand to .save.b must be the first of %d general registers"), n); reg = 0; } add_unwind_entry (output_br_gr (brmask, reg), 0); } else add_unwind_entry (output_br_mem (brmask), 0); } static void dot_savegf (int dummy ATTRIBUTE_UNUSED) { expressionS e1, e2; if (!in_prologue ("save.gf")) return; if (parse_operand_and_eval (&e1, ',') == ',') parse_operand_and_eval (&e2, 0); else e2.X_op = O_absent; if (e1.X_op != O_constant || e1.X_add_number < 0 || e1.X_add_number > 0xf) { as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant")); e1.X_op = O_absent; e1.X_add_number = 0; } if (e2.X_op != O_constant || e2.X_add_number < 0 || e2.X_add_number > 0xfffff) { as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant")); e2.X_op = O_absent; e2.X_add_number = 0; } if (e1.X_op == O_constant && e2.X_op == O_constant && e1.X_add_number == 0 && e2.X_add_number == 0) as_bad (_("Operands to .save.gf may not be both zero")); add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0); } static void dot_spill (int dummy ATTRIBUTE_UNUSED) { expressionS e; if (!in_prologue ("spill")) return; parse_operand_and_eval (&e, 0); if (e.X_op != O_constant) { as_bad (_("Operand to .spill must be a constant")); e.X_add_number = 0; } add_unwind_entry (output_spill_base (e.X_add_number), 0); } static void dot_spillreg (int pred) { int sep; unsigned int qp, ab, xy, reg, treg; expressionS e; const char * const po = pred ? "spillreg.p" : "spillreg"; if (!in_procedure (po)) return; if (pred) sep = parse_predicate_and_operand (&e, &qp, po); else { sep = parse_operand_and_eval (&e, ','); qp = 0; } convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred); if (sep == ',') sep = parse_operand_and_eval (&e, ','); else e.X_op = O_absent; convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred); add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep); } static void dot_spillmem (int psprel) { expressionS e; int pred = (psprel < 0), sep; unsigned int qp, ab, reg; const char * po; if (pred) { psprel = ~psprel; po = psprel ? "spillpsp.p" : "spillsp.p"; } else po = psprel ? "spillpsp" : "spillsp"; if (!in_procedure (po)) return; if (pred) sep = parse_predicate_and_operand (&e, &qp, po); else { sep = parse_operand_and_eval (&e, ','); qp = 0; } convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred); if (sep == ',') sep = parse_operand_and_eval (&e, ','); else e.X_op = O_absent; if (e.X_op != O_constant) { as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po); e.X_add_number = 0; } if (psprel) add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep); else add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep); } static unsigned int get_saved_prologue_count (unsigned long lbl) { label_prologue_count *lpc = unwind.saved_prologue_counts; while (lpc != NULL && lpc->label_number != lbl) lpc = lpc->next; if (lpc != NULL) return lpc->prologue_count; as_bad (_("Missing .label_state %ld"), lbl); return 1; } static void save_prologue_count (unsigned long lbl, unsigned int count) { label_prologue_count *lpc = unwind.saved_prologue_counts; while (lpc != NULL && lpc->label_number != lbl) lpc = lpc->next; if (lpc != NULL) lpc->prologue_count = count; else { label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc)); new_lpc->next = unwind.saved_prologue_counts; new_lpc->label_number = lbl; new_lpc->prologue_count = count; unwind.saved_prologue_counts = new_lpc; } } static void free_saved_prologue_counts () { label_prologue_count *lpc = unwind.saved_prologue_counts; label_prologue_count *next; while (lpc != NULL) { next = lpc->next; free (lpc); lpc = next; } unwind.saved_prologue_counts = NULL; } static void dot_label_state (int dummy ATTRIBUTE_UNUSED) { expressionS e; if (!in_body ("label_state")) return; parse_operand_and_eval (&e, 0); if (e.X_op == O_constant) save_prologue_count (e.X_add_number, unwind.prologue_count); else { as_bad (_("Operand to .label_state must be a constant")); e.X_add_number = 0; } add_unwind_entry (output_label_state (e.X_add_number), 0); } static void dot_copy_state (int dummy ATTRIBUTE_UNUSED) { expressionS e; if (!in_body ("copy_state")) return; parse_operand_and_eval (&e, 0); if (e.X_op == O_constant) unwind.prologue_count = get_saved_prologue_count (e.X_add_number); else { as_bad (_("Operand to .copy_state must be a constant")); e.X_add_number = 0; } add_unwind_entry (output_copy_state (e.X_add_number), 0); } static void dot_unwabi (int dummy ATTRIBUTE_UNUSED) { expressionS e1, e2; unsigned char sep; if (!in_prologue ("unwabi")) return; sep = parse_operand_and_eval (&e1, ','); if (sep == ',') parse_operand_and_eval (&e2, 0); else e2.X_op = O_absent; if (e1.X_op != O_constant) { as_bad (_("First operand to .unwabi must be a constant")); e1.X_add_number = 0; } if (e2.X_op != O_constant) { as_bad (_("Second operand to .unwabi must be a constant")); e2.X_add_number = 0; } add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0); } static void dot_personality (int dummy ATTRIBUTE_UNUSED) { char *name, *p, c; if (!in_procedure ("personality")) return; SKIP_WHITESPACE (); c = get_symbol_name (&name); p = input_line_pointer; unwind.personality_routine = symbol_find_or_make (name); unwind.force_unwind_entry = 1; *p = c; SKIP_WHITESPACE_AFTER_NAME (); demand_empty_rest_of_line (); } static void dot_proc (int dummy ATTRIBUTE_UNUSED) { char *name, *p, c; symbolS *sym; proc_pending *pending, *last_pending; if (unwind.proc_pending.sym) { (md.unwind_check == unwind_check_warning ? as_warn : as_bad) (_("Missing .endp after previous .proc")); while (unwind.proc_pending.next) { pending = unwind.proc_pending.next; unwind.proc_pending.next = pending->next; free (pending); } } last_pending = NULL; /* Parse names of main and alternate entry points and mark them as function symbols: */ while (1) { SKIP_WHITESPACE (); c = get_symbol_name (&name); p = input_line_pointer; if (!*name) as_bad (_("Empty argument of .proc")); else { sym = symbol_find_or_make (name); if (S_IS_DEFINED (sym)) as_bad (_("`%s' was already defined"), name); else if (!last_pending) { unwind.proc_pending.sym = sym; last_pending = &unwind.proc_pending; } else { pending = xmalloc (sizeof (*pending)); pending->sym = sym; last_pending = last_pending->next = pending; } symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION; } *p = c; SKIP_WHITESPACE_AFTER_NAME (); if (*input_line_pointer != ',') break; ++input_line_pointer; } if (!last_pending) { unwind.proc_pending.sym = expr_build_dot (); last_pending = &unwind.proc_pending; } last_pending->next = NULL; demand_empty_rest_of_line (); ia64_do_align (16); unwind.prologue = 0; unwind.prologue_count = 0; unwind.body = 0; unwind.insn = 0; unwind.list = unwind.tail = unwind.current_entry = NULL; unwind.personality_routine = 0; } static void dot_body (int dummy ATTRIBUTE_UNUSED) { if (!in_procedure ("body")) return; if (!unwind.prologue && !unwind.body && unwind.insn) as_warn (_("Initial .body should precede any instructions")); check_pending_save (); unwind.prologue = 0; unwind.prologue_mask = 0; unwind.body = 1; add_unwind_entry (output_body (), 0); } static void dot_prologue (int dummy ATTRIBUTE_UNUSED) { unsigned mask = 0, grsave = 0; if (!in_procedure ("prologue")) return; if (unwind.prologue) { as_bad (_(".prologue within prologue")); ignore_rest_of_line (); return; } if (!unwind.body && unwind.insn) as_warn (_("Initial .prologue should precede any instructions")); if (!is_it_end_of_statement ()) { expressionS e; int n, sep = parse_operand_and_eval (&e, ','); if (e.X_op != O_constant || e.X_add_number < 0 || e.X_add_number > 0xf) as_bad (_("First operand to .prologue must be a positive 4-bit constant")); else if (e.X_add_number == 0) as_warn (_("Pointless use of zero first operand to .prologue")); else mask = e.X_add_number; n = popcount (mask); if (sep == ',') parse_operand_and_eval (&e, 0); else e.X_op = O_absent; if (e.X_op == O_constant && e.X_add_number >= 0 && e.X_add_number < 128) { if (md.unwind_check == unwind_check_error) as_warn (_("Using a constant as second operand to .prologue is deprecated")); grsave = e.X_add_number; } else if (e.X_op != O_register || (grsave = e.X_add_number - REG_GR) > 127) { as_bad (_("Second operand to .prologue must be a general register")); grsave = 0; } else if (grsave > 128U - n) { as_bad (_("Second operand to .prologue must be the first of %d general registers"), n); grsave = 0; } } if (mask) add_unwind_entry (output_prologue_gr (mask, grsave), 0); else add_unwind_entry (output_prologue (), 0); unwind.prologue = 1; unwind.prologue_mask = mask; unwind.prologue_gr = grsave; unwind.body = 0; ++unwind.prologue_count; } static void dot_endp (int dummy ATTRIBUTE_UNUSED) { expressionS e; int bytes_per_address; long where; segT saved_seg; subsegT saved_subseg; proc_pending *pending; int unwind_check = md.unwind_check; md.unwind_check = unwind_check_error; if (!in_procedure ("endp")) return; md.unwind_check = unwind_check; if (unwind.saved_text_seg) { saved_seg = unwind.saved_text_seg; saved_subseg = unwind.saved_text_subseg; unwind.saved_text_seg = NULL; } else { saved_seg = now_seg; saved_subseg = now_subseg; } insn_group_break (1, 0, 0); /* If there wasn't a .handlerdata, we haven't generated an image yet. */ if (!unwind.info) generate_unwind_image (saved_seg); if (unwind.info || unwind.force_unwind_entry) { symbolS *proc_end; subseg_set (md.last_text_seg, 0); proc_end = expr_build_dot (); start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND); /* Make sure that section has 4 byte alignment for ILP32 and 8 byte alignment for LP64. */ record_alignment (now_seg, md.pointer_size_shift); /* Need space for 3 pointers for procedure start, procedure end, and unwind info. */ memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size); where = frag_now_fix () - (3 * md.pointer_size); bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8; /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */ e.X_op = O_pseudo_fixup; e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym; e.X_add_number = 0; if (!S_IS_LOCAL (unwind.proc_pending.sym) && S_IS_DEFINED (unwind.proc_pending.sym)) e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym), S_GET_VALUE (unwind.proc_pending.sym), symbol_get_frag (unwind.proc_pending.sym)); else e.X_add_symbol = unwind.proc_pending.sym; ia64_cons_fix_new (frag_now, where, bytes_per_address, &e, BFD_RELOC_NONE); e.X_op = O_pseudo_fixup; e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym; e.X_add_number = 0; e.X_add_symbol = proc_end; ia64_cons_fix_new (frag_now, where + bytes_per_address, bytes_per_address, &e, BFD_RELOC_NONE); if (unwind.info) { e.X_op = O_pseudo_fixup; e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym; e.X_add_number = 0; e.X_add_symbol = unwind.info; ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2), bytes_per_address, &e, BFD_RELOC_NONE); } } subseg_set (saved_seg, saved_subseg); /* Set symbol sizes. */ pending = &unwind.proc_pending; if (S_GET_NAME (pending->sym)) { do { symbolS *sym = pending->sym; if (!S_IS_DEFINED (sym)) as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym)); else if (S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL) { fragS *frag = symbol_get_frag (sym); if (frag) { if (frag == frag_now && SEG_NORMAL (now_seg)) S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym)); else { symbol_get_obj (sym)->size = (expressionS *) xmalloc (sizeof (expressionS)); symbol_get_obj (sym)->size->X_op = O_subtract; symbol_get_obj (sym)->size->X_add_symbol = symbol_new (FAKE_LABEL_NAME, now_seg, frag_now_fix (), frag_now); symbol_get_obj (sym)->size->X_op_symbol = sym; symbol_get_obj (sym)->size->X_add_number = 0; } } } } while ((pending = pending->next) != NULL); } /* Parse names of main and alternate entry points. */ while (1) { char *name, *p, c; SKIP_WHITESPACE (); c = get_symbol_name (&name); p = input_line_pointer; if (!*name) (md.unwind_check == unwind_check_warning ? as_warn : as_bad) (_("Empty argument of .endp")); else { symbolS *sym = symbol_find (name); for (pending = &unwind.proc_pending; pending; pending = pending->next) { if (sym == pending->sym) { pending->sym = NULL; break; } } if (!sym || !pending) as_warn (_("`%s' was not specified with previous .proc"), name); } *p = c; SKIP_WHITESPACE_AFTER_NAME (); if (*input_line_pointer != ',') break; ++input_line_pointer; } demand_empty_rest_of_line (); /* Deliberately only checking for the main entry point here; the language spec even says all arguments to .endp are ignored. */ if (unwind.proc_pending.sym && S_GET_NAME (unwind.proc_pending.sym) && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME)) as_warn (_("`%s' should be an operand to this .endp"), S_GET_NAME (unwind.proc_pending.sym)); while (unwind.proc_pending.next) { pending = unwind.proc_pending.next; unwind.proc_pending.next = pending->next; free (pending); } unwind.proc_pending.sym = unwind.info = NULL; } static void dot_template (int template_val) { CURR_SLOT.user_template = template_val; } static void dot_regstk (int dummy ATTRIBUTE_UNUSED) { int ins, locs, outs, rots; if (is_it_end_of_statement ()) ins = locs = outs = rots = 0; else { ins = get_absolute_expression (); if (*input_line_pointer++ != ',') goto err; locs = get_absolute_expression (); if (*input_line_pointer++ != ',') goto err; outs = get_absolute_expression (); if (*input_line_pointer++ != ',') goto err; rots = get_absolute_expression (); } set_regstack (ins, locs, outs, rots); return; err: as_bad (_("Comma expected")); ignore_rest_of_line (); } static void dot_rot (int type) { offsetT num_regs; valueT num_alloced = 0; struct dynreg **drpp, *dr; int ch, base_reg = 0; char *name, *start; size_t len; switch (type) { case DYNREG_GR: base_reg = REG_GR + 32; break; case DYNREG_FR: base_reg = REG_FR + 32; break; case DYNREG_PR: base_reg = REG_P + 16; break; default: break; } /* First, remove existing names from hash table. */ for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next) { hash_delete (md.dynreg_hash, dr->name, FALSE); /* FIXME: Free dr->name. */ dr->num_regs = 0; } drpp = &md.dynreg[type]; while (1) { ch = get_symbol_name (&start); len = strlen (ia64_canonicalize_symbol_name (start)); *input_line_pointer = ch; SKIP_WHITESPACE_AFTER_NAME (); if (*input_line_pointer != '[') { as_bad (_("Expected '['")); goto err; } ++input_line_pointer; /* skip '[' */ num_regs = get_absolute_expression (); if (*input_line_pointer++ != ']') { as_bad (_("Expected ']'")); goto err; } if (num_regs <= 0) { as_bad (_("Number of elements must be positive")); goto err; } SKIP_WHITESPACE (); num_alloced += num_regs; switch (type) { case DYNREG_GR: if (num_alloced > md.rot.num_regs) { as_bad (_("Used more than the declared %d rotating registers"), md.rot.num_regs); goto err; } break; case DYNREG_FR: if (num_alloced > 96) { as_bad (_("Used more than the available 96 rotating registers")); goto err; } break; case DYNREG_PR: if (num_alloced > 48) { as_bad (_("Used more than the available 48 rotating registers")); goto err; } break; default: break; } if (!*drpp) { *drpp = obstack_alloc (&notes, sizeof (*dr)); memset (*drpp, 0, sizeof (*dr)); } name = obstack_alloc (&notes, len + 1); memcpy (name, start, len); name[len] = '\0'; dr = *drpp; dr->name = name; dr->num_regs = num_regs; dr->base = base_reg; drpp = &dr->next; base_reg += num_regs; if (hash_insert (md.dynreg_hash, name, dr)) { as_bad (_("Attempt to redefine register set `%s'"), name); obstack_free (&notes, name); goto err; } if (*input_line_pointer != ',') break; ++input_line_pointer; /* skip comma */ SKIP_WHITESPACE (); } demand_empty_rest_of_line (); return; err: ignore_rest_of_line (); } static void dot_byteorder (int byteorder) { segment_info_type *seginfo = seg_info (now_seg); if (byteorder == -1) { if (seginfo->tc_segment_info_data.endian == 0) seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2; byteorder = seginfo->tc_segment_info_data.endian == 1; } else seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2; if (target_big_endian != byteorder) { target_big_endian = byteorder; if (target_big_endian) { ia64_number_to_chars = number_to_chars_bigendian; ia64_float_to_chars = ia64_float_to_chars_bigendian; } else { ia64_number_to_chars = number_to_chars_littleendian; ia64_float_to_chars = ia64_float_to_chars_littleendian; } } } static void dot_psr (int dummy ATTRIBUTE_UNUSED) { char *option; int ch; while (1) { ch = get_symbol_name (&option); if (strcmp (option, "lsb") == 0) md.flags &= ~EF_IA_64_BE; else if (strcmp (option, "msb") == 0) md.flags |= EF_IA_64_BE; else if (strcmp (option, "abi32") == 0) md.flags &= ~EF_IA_64_ABI64; else if (strcmp (option, "abi64") == 0) md.flags |= EF_IA_64_ABI64; else as_bad (_("Unknown psr option `%s'"), option); *input_line_pointer = ch; SKIP_WHITESPACE_AFTER_NAME (); if (*input_line_pointer != ',') break; ++input_line_pointer; SKIP_WHITESPACE (); } demand_empty_rest_of_line (); } static void dot_ln (int dummy ATTRIBUTE_UNUSED) { new_logical_line (0, get_absolute_expression ()); demand_empty_rest_of_line (); } static void cross_section (int ref, void (*builder) (int), int ua) { char *start, *end; int saved_auto_align; unsigned int section_count; char *name; char c; SKIP_WHITESPACE (); start = input_line_pointer; c = get_symbol_name (&name); if (input_line_pointer == start) { as_bad (_("Missing section name")); ignore_rest_of_line (); return; } * input_line_pointer = c; SKIP_WHITESPACE_AFTER_NAME (); end = input_line_pointer; if (*input_line_pointer != ',') { as_bad (_("Comma expected after section name")); ignore_rest_of_line (); return; } *end = '\0'; end = input_line_pointer + 1; /* skip comma */ input_line_pointer = start; md.keep_pending_output = 1; section_count = bfd_count_sections (stdoutput); obj_elf_section (0); if (section_count != bfd_count_sections (stdoutput)) as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated.")); input_line_pointer = end; saved_auto_align = md.auto_align; if (ua) md.auto_align = 0; (*builder) (ref); if (ua) md.auto_align = saved_auto_align; obj_elf_previous (0); md.keep_pending_output = 0; } static void dot_xdata (int size) { cross_section (size, cons, 0); } /* Why doesn't float_cons() call md_cons_align() the way cons() does? */ static void stmt_float_cons (int kind) { size_t alignment; switch (kind) { case 'd': alignment = 8; break; case 'x': case 'X': alignment = 16; break; case 'f': default: alignment = 4; break; } ia64_do_align (alignment); float_cons (kind); } static void stmt_cons_ua (int size) { int saved_auto_align = md.auto_align; md.auto_align = 0; cons (size); md.auto_align = saved_auto_align; } static void dot_xfloat_cons (int kind) { cross_section (kind, stmt_float_cons, 0); } static void dot_xstringer (int zero) { cross_section (zero, stringer, 0); } static void dot_xdata_ua (int size) { cross_section (size, cons, 1); } static void dot_xfloat_cons_ua (int kind) { cross_section (kind, float_cons, 1); } /* .reg.val <regname>,value */ static void dot_reg_val (int dummy ATTRIBUTE_UNUSED) { expressionS reg; expression_and_evaluate (&reg); if (reg.X_op != O_register) { as_bad (_("Register name expected")); ignore_rest_of_line (); } else if (*input_line_pointer++ != ',') { as_bad (_("Comma expected")); ignore_rest_of_line (); } else { valueT value = get_absolute_expression (); int regno = reg.X_add_number; if (regno <= REG_GR || regno > REG_GR + 127) as_warn (_("Register value annotation ignored")); else { gr_values[regno - REG_GR].known = 1; gr_values[regno - REG_GR].value = value; gr_values[regno - REG_GR].path = md.path; } } demand_empty_rest_of_line (); } /* .serialize.data .serialize.instruction */ static void dot_serialize (int type) { insn_group_break (0, 0, 0); if (type) instruction_serialization (); else data_serialization (); insn_group_break (0, 0, 0); demand_empty_rest_of_line (); } /* select dv checking mode .auto .explicit .default A stop is inserted when changing modes */ static void dot_dv_mode (int type) { if (md.manual_bundling) as_warn (_("Directive invalid within a bundle")); if (type == 'E' || type == 'A') md.mode_explicitly_set = 0; else md.mode_explicitly_set = 1; md.detect_dv = 1; switch (type) { case 'A': case 'a': if (md.explicit_mode) insn_group_break (1, 0, 0); md.explicit_mode = 0; break; case 'E': case 'e': if (!md.explicit_mode) insn_group_break (1, 0, 0); md.explicit_mode = 1; break; default: case 'd': if (md.explicit_mode != md.default_explicit_mode) insn_group_break (1, 0, 0); md.explicit_mode = md.default_explicit_mode; md.mode_explicitly_set = 0; break; } } static void print_prmask (valueT mask) { int regno; char *comma = ""; for (regno = 0; regno < 64; regno++) { if (mask & ((valueT) 1 << regno)) { fprintf (stderr, "%s p%d", comma, regno); comma = ","; } } } /* .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear) .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply) .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex) .pred.safe_across_calls p1 [, p2 [,...]] */ static void dot_pred_rel (int type) { valueT mask = 0; int count = 0; int p1 = -1, p2 = -1; if (type == 0) { if (*input_line_pointer == '"') { int len; char *form = demand_copy_C_string (&len); if (strcmp (form, "mutex") == 0) type = 'm'; else if (strcmp (form, "clear") == 0) type = 'c'; else if (strcmp (form, "imply") == 0) type = 'i'; obstack_free (&notes, form); } else if (*input_line_pointer == '@') { char *form; char c; ++input_line_pointer; c = get_symbol_name (&form); if (strcmp (form, "mutex") == 0) type = 'm'; else if (strcmp (form, "clear") == 0) type = 'c'; else if (strcmp (form, "imply") == 0) type = 'i'; (void) restore_line_pointer (c); } else { as_bad (_("Missing predicate relation type")); ignore_rest_of_line (); return; } if (type == 0) { as_bad (_("Unrecognized predicate relation type")); ignore_rest_of_line (); return; } if (*input_line_pointer == ',') ++input_line_pointer; SKIP_WHITESPACE (); } while (1) { valueT bits = 1; int sep, regno; expressionS pr, *pr1, *pr2; sep = parse_operand_and_eval (&pr, ','); if (pr.X_op == O_register && pr.X_add_number >= REG_P && pr.X_add_number <= REG_P + 63) { regno = pr.X_add_number - REG_P; bits <<= regno; count++; if (p1 == -1) p1 = regno; else if (p2 == -1) p2 = regno; } else if (type != 'i' && pr.X_op == O_subtract && (pr1 = symbol_get_value_expression (pr.X_add_symbol)) && pr1->X_op == O_register && pr1->X_add_number >= REG_P && pr1->X_add_number <= REG_P + 63 && (pr2 = symbol_get_value_expression (pr.X_op_symbol)) && pr2->X_op == O_register && pr2->X_add_number >= REG_P && pr2->X_add_number <= REG_P + 63) { /* It's a range. */ int stop; regno = pr1->X_add_number - REG_P; stop = pr2->X_add_number - REG_P; if (regno >= stop) { as_bad (_("Bad register range")); ignore_rest_of_line (); return; } bits = ((bits << stop) << 1) - (bits << regno); count += stop - regno + 1; } else { as_bad (_("Predicate register expected")); ignore_rest_of_line (); return; } if (mask & bits) as_warn (_("Duplicate predicate register ignored")); mask |= bits; if (sep != ',') break; } switch (type) { case 'c': if (count == 0) mask = ~(valueT) 0; clear_qp_mutex (mask); clear_qp_implies (mask, (valueT) 0); break; case 'i': if (count != 2 || p1 == -1 || p2 == -1) as_bad (_("Predicate source and target required")); else if (p1 == 0 || p2 == 0) as_bad (_("Use of p0 is not valid in this context")); else add_qp_imply (p1, p2); break; case 'm': if (count < 2) { as_bad (_("At least two PR arguments expected")); break; } else if (mask & 1) { as_bad (_("Use of p0 is not valid in this context")); break; } add_qp_mutex (mask); break; case 's': /* note that we don't override any existing relations */ if (count == 0) { as_bad (_("At least one PR argument expected")); break; } if (md.debug_dv) { fprintf (stderr, "Safe across calls: "); print_prmask (mask); fprintf (stderr, "\n"); } qp_safe_across_calls = mask; break; } demand_empty_rest_of_line (); } /* .entry label [, label [, ...]] Hint to DV code that the given labels are to be considered entry points. Otherwise, only global labels are considered entry points. */ static void dot_entry (int dummy ATTRIBUTE_UNUSED) { const char *err; char *name; int c; symbolS *symbolP; do { c = get_symbol_name (&name); symbolP = symbol_find_or_make (name); err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP); if (err) as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"), name, err); *input_line_pointer = c; SKIP_WHITESPACE_AFTER_NAME (); c = *input_line_pointer; if (c == ',') { input_line_pointer++; SKIP_WHITESPACE (); if (*input_line_pointer == '\n') c = '\n'; } } while (c == ','); demand_empty_rest_of_line (); } /* .mem.offset offset, base "base" is used to distinguish between offsets from a different base. */ static void dot_mem_offset (int dummy ATTRIBUTE_UNUSED) { md.mem_offset.hint = 1; md.mem_offset.offset = get_absolute_expression (); if (*input_line_pointer != ',') { as_bad (_("Comma expected")); ignore_rest_of_line (); return; } ++input_line_pointer; md.mem_offset.base = get_absolute_expression (); demand_empty_rest_of_line (); } /* ia64-specific pseudo-ops: */ const pseudo_typeS md_pseudo_table[] = { { "radix", dot_radix, 0 }, { "lcomm", s_lcomm_bytes, 1 }, { "loc", dot_loc, 0 }, { "bss", dot_special_section, SPECIAL_SECTION_BSS }, { "sbss", dot_special_section, SPECIAL_SECTION_SBSS }, { "sdata", dot_special_section, SPECIAL_SECTION_SDATA }, { "rodata", dot_special_section, SPECIAL_SECTION_RODATA }, { "comment", dot_special_section, SPECIAL_SECTION_COMMENT }, { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND }, { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO }, { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY }, { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY }, { "proc", dot_proc, 0 }, { "body", dot_body, 0 }, { "prologue", dot_prologue, 0 }, { "endp", dot_endp, 0 }, { "fframe", dot_fframe, 0 }, { "vframe", dot_vframe, 0 }, { "vframesp", dot_vframesp, 0 }, { "vframepsp", dot_vframesp, 1 }, { "save", dot_save, 0 }, { "restore", dot_restore, 0 }, { "restorereg", dot_restorereg, 0 }, { "restorereg.p", dot_restorereg, 1 }, { "handlerdata", dot_handlerdata, 0 }, { "unwentry", dot_unwentry, 0 }, { "altrp", dot_altrp, 0 }, { "savesp", dot_savemem, 0 }, { "savepsp", dot_savemem, 1 }, { "save.g", dot_saveg, 0 }, { "save.f", dot_savef, 0 }, { "save.b", dot_saveb, 0 }, { "save.gf", dot_savegf, 0 }, { "spill", dot_spill, 0 }, { "spillreg", dot_spillreg, 0 }, { "spillsp", dot_spillmem, 0 }, { "spillpsp", dot_spillmem, 1 }, { "spillreg.p", dot_spillreg, 1 }, { "spillsp.p", dot_spillmem, ~0 }, { "spillpsp.p", dot_spillmem, ~1 }, { "label_state", dot_label_state, 0 }, { "copy_state", dot_copy_state, 0 }, { "unwabi", dot_unwabi, 0 }, { "personality", dot_personality, 0 }, { "mii", dot_template, 0x0 }, { "mli", dot_template, 0x2 }, /* old format, for compatibility */ { "mlx", dot_template, 0x2 }, { "mmi", dot_template, 0x4 }, { "mfi", dot_template, 0x6 }, { "mmf", dot_template, 0x7 }, { "mib", dot_template, 0x8 }, { "mbb", dot_template, 0x9 }, { "bbb", dot_template, 0xb }, { "mmb", dot_template, 0xc }, { "mfb", dot_template, 0xe }, { "align", dot_align, 0 }, { "regstk", dot_regstk, 0 }, { "rotr", dot_rot, DYNREG_GR }, { "rotf", dot_rot, DYNREG_FR }, { "rotp", dot_rot, DYNREG_PR }, { "lsb", dot_byteorder, 0 }, { "msb", dot_byteorder, 1 }, { "psr", dot_psr, 0 }, { "alias", dot_alias, 0 }, { "secalias", dot_alias, 1 }, { "ln", dot_ln, 0 }, /* source line info (for debugging) */ { "xdata1", dot_xdata, 1 }, { "xdata2", dot_xdata, 2 }, { "xdata4", dot_xdata, 4 }, { "xdata8", dot_xdata, 8 }, { "xdata16", dot_xdata, 16 }, { "xreal4", dot_xfloat_cons, 'f' }, { "xreal8", dot_xfloat_cons, 'd' }, { "xreal10", dot_xfloat_cons, 'x' }, { "xreal16", dot_xfloat_cons, 'X' }, { "xstring", dot_xstringer, 8 + 0 }, { "xstringz", dot_xstringer, 8 + 1 }, /* unaligned versions: */ { "xdata2.ua", dot_xdata_ua, 2 }, { "xdata4.ua", dot_xdata_ua, 4 }, { "xdata8.ua", dot_xdata_ua, 8 }, { "xdata16.ua", dot_xdata_ua, 16 }, { "xreal4.ua", dot_xfloat_cons_ua, 'f' }, { "xreal8.ua", dot_xfloat_cons_ua, 'd' }, { "xreal10.ua", dot_xfloat_cons_ua, 'x' }, { "xreal16.ua", dot_xfloat_cons_ua, 'X' }, /* annotations/DV checking support */ { "entry", dot_entry, 0 }, { "mem.offset", dot_mem_offset, 0 }, { "pred.rel", dot_pred_rel, 0 }, { "pred.rel.clear", dot_pred_rel, 'c' }, { "pred.rel.imply", dot_pred_rel, 'i' }, { "pred.rel.mutex", dot_pred_rel, 'm' }, { "pred.safe_across_calls", dot_pred_rel, 's' }, { "reg.val", dot_reg_val, 0 }, { "serialize.data", dot_serialize, 0 }, { "serialize.instruction", dot_serialize, 1 }, { "auto", dot_dv_mode, 'a' }, { "explicit", dot_dv_mode, 'e' }, { "default", dot_dv_mode, 'd' }, /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work. IA-64 aligns data allocation pseudo-ops by default, so we have to tell it that these ones are supposed to be unaligned. Long term, should rewrite so that only IA-64 specific data allocation pseudo-ops are aligned by default. */ {"2byte", stmt_cons_ua, 2}, {"4byte", stmt_cons_ua, 4}, {"8byte", stmt_cons_ua, 8}, #ifdef TE_VMS {"vms_common", obj_elf_vms_common, 0}, #endif { NULL, 0, 0 } }; static const struct pseudo_opcode { const char *name; void (*handler) (int); int arg; } pseudo_opcode[] = { /* these are more like pseudo-ops, but don't start with a dot */ { "data1", cons, 1 }, { "data2", cons, 2 }, { "data4", cons, 4 }, { "data8", cons, 8 }, { "data16", cons, 16 }, { "real4", stmt_float_cons, 'f' }, { "real8", stmt_float_cons, 'd' }, { "real10", stmt_float_cons, 'x' }, { "real16", stmt_float_cons, 'X' }, { "string", stringer, 8 + 0 }, { "stringz", stringer, 8 + 1 }, /* unaligned versions: */ { "data2.ua", stmt_cons_ua, 2 }, { "data4.ua", stmt_cons_ua, 4 }, { "data8.ua", stmt_cons_ua, 8 }, { "data16.ua", stmt_cons_ua, 16 }, { "real4.ua", float_cons, 'f' }, { "real8.ua", float_cons, 'd' }, { "real10.ua", float_cons, 'x' }, { "real16.ua", float_cons, 'X' }, }; /* Declare a register by creating a symbol for it and entering it in the symbol table. */ static symbolS * declare_register (const char *name, unsigned int regnum) { const char *err; symbolS *sym; sym = symbol_create (name, reg_section, regnum, &zero_address_frag); err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym); if (err) as_fatal ("Inserting \"%s\" into register table failed: %s", name, err); return sym; } static void declare_register_set (const char *prefix, unsigned int num_regs, unsigned int base_regnum) { char name[8]; unsigned int i; for (i = 0; i < num_regs; ++i) { snprintf (name, sizeof (name), "%s%u", prefix, i); declare_register (name, base_regnum + i); } } static unsigned int operand_width (enum ia64_opnd opnd) { const struct ia64_operand *odesc = &elf64_ia64_operands[opnd]; unsigned int bits = 0; int i; bits = 0; for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i) bits += odesc->field[i].bits; return bits; } static enum operand_match_result operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e) { enum ia64_opnd opnd = idesc->operands[res_index]; int bits, relocatable = 0; struct insn_fix *fix; bfd_signed_vma val; switch (opnd) { /* constants: */ case IA64_OPND_AR_CCV: if (e->X_op == O_register && e->X_add_number == REG_AR + 32) return OPERAND_MATCH; break; case IA64_OPND_AR_CSD: if (e->X_op == O_register && e->X_add_number == REG_AR + 25) return OPERAND_MATCH; break; case IA64_OPND_AR_PFS: if (e->X_op == O_register && e->X_add_number == REG_AR + 64) return OPERAND_MATCH; break; case IA64_OPND_GR0: if (e->X_op == O_register && e->X_add_number == REG_GR + 0) return OPERAND_MATCH; break; case IA64_OPND_IP: if (e->X_op == O_register && e->X_add_number == REG_IP) return OPERAND_MATCH; break; case IA64_OPND_PR: if (e->X_op == O_register && e->X_add_number == REG_PR) return OPERAND_MATCH; break; case IA64_OPND_PR_ROT: if (e->X_op == O_register && e->X_add_number == REG_PR_ROT) return OPERAND_MATCH; break; case IA64_OPND_PSR: if (e->X_op == O_register && e->X_add_number == REG_PSR) return OPERAND_MATCH; break; case IA64_OPND_PSR_L: if (e->X_op == O_register && e->X_add_number == REG_PSR_L) return OPERAND_MATCH; break; case IA64_OPND_PSR_UM: if (e->X_op == O_register && e->X_add_number == REG_PSR_UM) return OPERAND_MATCH; break; case IA64_OPND_C1: if (e->X_op == O_constant) { if (e->X_add_number == 1) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_C8: if (e->X_op == O_constant) { if (e->X_add_number == 8) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_C16: if (e->X_op == O_constant) { if (e->X_add_number == 16) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; /* register operands: */ case IA64_OPND_AR3: if (e->X_op == O_register && e->X_add_number >= REG_AR && e->X_add_number < REG_AR + 128) return OPERAND_MATCH; break; case IA64_OPND_B1: case IA64_OPND_B2: if (e->X_op == O_register && e->X_add_number >= REG_BR && e->X_add_number < REG_BR + 8) return OPERAND_MATCH; break; case IA64_OPND_CR3: if (e->X_op == O_register && e->X_add_number >= REG_CR && e->X_add_number < REG_CR + 128) return OPERAND_MATCH; break; case IA64_OPND_DAHR3: if (e->X_op == O_register && e->X_add_number >= REG_DAHR && e->X_add_number < REG_DAHR + 8) return OPERAND_MATCH; break; case IA64_OPND_F1: case IA64_OPND_F2: case IA64_OPND_F3: case IA64_OPND_F4: if (e->X_op == O_register && e->X_add_number >= REG_FR && e->X_add_number < REG_FR + 128) return OPERAND_MATCH; break; case IA64_OPND_P1: case IA64_OPND_P2: if (e->X_op == O_register && e->X_add_number >= REG_P && e->X_add_number < REG_P + 64) return OPERAND_MATCH; break; case IA64_OPND_R1: case IA64_OPND_R2: case IA64_OPND_R3: if (e->X_op == O_register && e->X_add_number >= REG_GR && e->X_add_number < REG_GR + 128) return OPERAND_MATCH; break; case IA64_OPND_R3_2: if (e->X_op == O_register && e->X_add_number >= REG_GR) { if (e->X_add_number < REG_GR + 4) return OPERAND_MATCH; else if (e->X_add_number < REG_GR + 128) return OPERAND_OUT_OF_RANGE; } break; /* indirect operands: */ case IA64_OPND_CPUID_R3: case IA64_OPND_DBR_R3: case IA64_OPND_DTR_R3: case IA64_OPND_ITR_R3: case IA64_OPND_IBR_R3: case IA64_OPND_MSR_R3: case IA64_OPND_PKR_R3: case IA64_OPND_PMC_R3: case IA64_OPND_PMD_R3: case IA64_OPND_DAHR_R3: case IA64_OPND_RR_R3: if (e->X_op == O_index && e->X_op_symbol && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID == opnd - IA64_OPND_CPUID_R3)) return OPERAND_MATCH; break; case IA64_OPND_MR3: if (e->X_op == O_index && !e->X_op_symbol) return OPERAND_MATCH; break; /* immediate operands: */ case IA64_OPND_CNT2a: case IA64_OPND_LEN4: case IA64_OPND_LEN6: bits = operand_width (idesc->operands[res_index]); if (e->X_op == O_constant) { if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits)) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_CNT2b: if (e->X_op == O_constant) { if ((bfd_vma) (e->X_add_number - 1) < 3) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_CNT2c: val = e->X_add_number; if (e->X_op == O_constant) { if ((val == 0 || val == 7 || val == 15 || val == 16)) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_SOR: /* SOR must be an integer multiple of 8 */ if (e->X_op == O_constant && e->X_add_number & 0x7) return OPERAND_OUT_OF_RANGE; case IA64_OPND_SOF: case IA64_OPND_SOL: if (e->X_op == O_constant) { if ((bfd_vma) e->X_add_number <= 96) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_IMMU62: if (e->X_op == O_constant) { if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62)) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } else { /* FIXME -- need 62-bit relocation type */ as_bad (_("62-bit relocation not yet implemented")); } break; case IA64_OPND_IMMU64: if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup || e->X_op == O_subtract) { fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; fix->code = BFD_RELOC_IA64_IMM64; if (e->X_op != O_subtract) { fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); if (e->X_op == O_pseudo_fixup) e->X_op = O_symbol; } fix->opnd = idesc->operands[res_index]; fix->expr = *e; fix->is_pcrel = 0; ++CURR_SLOT.num_fixups; return OPERAND_MATCH; } else if (e->X_op == O_constant) return OPERAND_MATCH; break; case IA64_OPND_IMMU5b: if (e->X_op == O_constant) { val = e->X_add_number; if (val >= 32 && val <= 63) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_CCNT5: case IA64_OPND_CNT5: case IA64_OPND_CNT6: case IA64_OPND_CPOS6a: case IA64_OPND_CPOS6b: case IA64_OPND_CPOS6c: case IA64_OPND_IMMU2: case IA64_OPND_IMMU7a: case IA64_OPND_IMMU7b: case IA64_OPND_IMMU16: case IA64_OPND_IMMU19: case IA64_OPND_IMMU21: case IA64_OPND_IMMU24: case IA64_OPND_MBTYPE4: case IA64_OPND_MHTYPE8: case IA64_OPND_POS6: bits = operand_width (idesc->operands[res_index]); if (e->X_op == O_constant) { if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits)) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_IMMU9: bits = operand_width (idesc->operands[res_index]); if (e->X_op == O_constant) { if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits)) { int lobits = e->X_add_number & 0x3; if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0) e->X_add_number |= (bfd_vma) 0x3; return OPERAND_MATCH; } else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_IMM44: /* least 16 bits must be zero */ if ((e->X_add_number & 0xffff) != 0) /* XXX technically, this is wrong: we should not be issuing warning messages until we're sure this instruction pattern is going to be used! */ as_warn (_("lower 16 bits of mask ignored")); if (e->X_op == O_constant) { if (((e->X_add_number >= 0 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44)) || (e->X_add_number < 0 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44)))) { /* sign-extend */ if (e->X_add_number >= 0 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0) { e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1); } return OPERAND_MATCH; } else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_IMM17: /* bit 0 is a don't care (pr0 is hardwired to 1) */ if (e->X_op == O_constant) { if (((e->X_add_number >= 0 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17)) || (e->X_add_number < 0 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17)))) { /* sign-extend */ if (e->X_add_number >= 0 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0) { e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1); } return OPERAND_MATCH; } else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_IMM14: case IA64_OPND_IMM22: relocatable = 1; case IA64_OPND_IMM1: case IA64_OPND_IMM8: case IA64_OPND_IMM8U4: case IA64_OPND_IMM8M1: case IA64_OPND_IMM8M1U4: case IA64_OPND_IMM8M1U8: case IA64_OPND_IMM9a: case IA64_OPND_IMM9b: bits = operand_width (idesc->operands[res_index]); if (relocatable && (e->X_op == O_symbol || e->X_op == O_subtract || e->X_op == O_pseudo_fixup)) { fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; if (idesc->operands[res_index] == IA64_OPND_IMM14) fix->code = BFD_RELOC_IA64_IMM14; else fix->code = BFD_RELOC_IA64_IMM22; if (e->X_op != O_subtract) { fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); if (e->X_op == O_pseudo_fixup) e->X_op = O_symbol; } fix->opnd = idesc->operands[res_index]; fix->expr = *e; fix->is_pcrel = 0; ++CURR_SLOT.num_fixups; return OPERAND_MATCH; } else if (e->X_op != O_constant && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8)) return OPERAND_MISMATCH; if (opnd == IA64_OPND_IMM8M1U4) { /* Zero is not valid for unsigned compares that take an adjusted constant immediate range. */ if (e->X_add_number == 0) return OPERAND_OUT_OF_RANGE; /* Sign-extend 32-bit unsigned numbers, so that the following range checks will work. */ val = e->X_add_number; if (((val & (~(bfd_vma) 0 << 32)) == 0) && ((val & ((bfd_vma) 1 << 31)) != 0)) val = ((val << 32) >> 32); /* Check for 0x100000000. This is valid because 0x100000000-1 is the same as ((uint32_t) -1). */ if (val == ((bfd_signed_vma) 1 << 32)) return OPERAND_MATCH; val = val - 1; } else if (opnd == IA64_OPND_IMM8M1U8) { /* Zero is not valid for unsigned compares that take an adjusted constant immediate range. */ if (e->X_add_number == 0) return OPERAND_OUT_OF_RANGE; /* Check for 0x10000000000000000. */ if (e->X_op == O_big) { if (generic_bignum[0] == 0 && generic_bignum[1] == 0 && generic_bignum[2] == 0 && generic_bignum[3] == 0 && generic_bignum[4] == 1) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } else val = e->X_add_number - 1; } else if (opnd == IA64_OPND_IMM8M1) val = e->X_add_number - 1; else if (opnd == IA64_OPND_IMM8U4) { /* Sign-extend 32-bit unsigned numbers, so that the following range checks will work. */ val = e->X_add_number; if (((val & (~(bfd_vma) 0 << 32)) == 0) && ((val & ((bfd_vma) 1 << 31)) != 0)) val = ((val << 32) >> 32); } else val = e->X_add_number; if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1))) || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1)))) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; case IA64_OPND_INC3: /* +/- 1, 4, 8, 16 */ val = e->X_add_number; if (val < 0) val = -val; if (e->X_op == O_constant) { if ((val == 1 || val == 4 || val == 8 || val == 16)) return OPERAND_MATCH; else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_TGT25: case IA64_OPND_TGT25b: case IA64_OPND_TGT25c: case IA64_OPND_TGT64: if (e->X_op == O_symbol) { fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; if (opnd == IA64_OPND_TGT25) fix->code = BFD_RELOC_IA64_PCREL21F; else if (opnd == IA64_OPND_TGT25b) fix->code = BFD_RELOC_IA64_PCREL21M; else if (opnd == IA64_OPND_TGT25c) fix->code = BFD_RELOC_IA64_PCREL21B; else if (opnd == IA64_OPND_TGT64) fix->code = BFD_RELOC_IA64_PCREL60B; else abort (); fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); fix->opnd = idesc->operands[res_index]; fix->expr = *e; fix->is_pcrel = 1; ++CURR_SLOT.num_fixups; return OPERAND_MATCH; } case IA64_OPND_TAG13: case IA64_OPND_TAG13b: switch (e->X_op) { case O_constant: return OPERAND_MATCH; case O_symbol: fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; /* There are no external relocs for TAG13/TAG13b fields, so we create a dummy reloc. This will not live past md_apply_fix. */ fix->code = BFD_RELOC_UNUSED; fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); fix->opnd = idesc->operands[res_index]; fix->expr = *e; fix->is_pcrel = 1; ++CURR_SLOT.num_fixups; return OPERAND_MATCH; default: break; } break; case IA64_OPND_LDXMOV: fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; fix->code = BFD_RELOC_IA64_LDXMOV; fix->opnd = idesc->operands[res_index]; fix->expr = *e; fix->is_pcrel = 0; ++CURR_SLOT.num_fixups; return OPERAND_MATCH; case IA64_OPND_STRD5b: if (e->X_op == O_constant) { /* 5-bit signed scaled by 64 */ if ((e->X_add_number <= ( 0xf << 6 )) && (e->X_add_number >= -( 0x10 << 6 ))) { /* Must be a multiple of 64 */ if ((e->X_add_number & 0x3f) != 0) as_warn (_("stride must be a multiple of 64; lower 6 bits ignored")); e->X_add_number &= ~ 0x3f; return OPERAND_MATCH; } else return OPERAND_OUT_OF_RANGE; } break; case IA64_OPND_CNT6a: if (e->X_op == O_constant) { /* 6-bit unsigned biased by 1 -- count 0 is meaningless */ if ((e->X_add_number <= 64) && (e->X_add_number > 0) ) { return OPERAND_MATCH; } else return OPERAND_OUT_OF_RANGE; } break; default: break; } return OPERAND_MISMATCH; } static int parse_operand (expressionS *e, int more) { int sep = '\0'; memset (e, 0, sizeof (*e)); e->X_op = O_absent; SKIP_WHITESPACE (); expression (e); sep = *input_line_pointer; if (more && (sep == ',' || sep == more)) ++input_line_pointer; return sep; } static int parse_operand_and_eval (expressionS *e, int more) { int sep = parse_operand (e, more); resolve_expression (e); return sep; } static int parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op) { int sep = parse_operand (e, more); switch (op) { case IA64_OPND_IMM14: case IA64_OPND_IMM22: case IA64_OPND_IMMU64: case IA64_OPND_TGT25: case IA64_OPND_TGT25b: case IA64_OPND_TGT25c: case IA64_OPND_TGT64: case IA64_OPND_TAG13: case IA64_OPND_TAG13b: case IA64_OPND_LDXMOV: break; default: resolve_expression (e); break; } return sep; } /* Returns the next entry in the opcode table that matches the one in IDESC, and frees the entry in IDESC. If no matching entry is found, NULL is returned instead. */ static struct ia64_opcode * get_next_opcode (struct ia64_opcode *idesc) { struct ia64_opcode *next = ia64_find_next_opcode (idesc); ia64_free_opcode (idesc); return next; } /* Parse the operands for the opcode and find the opcode variant that matches the specified operands, or NULL if no match is possible. */ static struct ia64_opcode * parse_operands (struct ia64_opcode *idesc) { int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0; int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0; int reg1, reg2; char reg_class; enum ia64_opnd expected_operand = IA64_OPND_NIL; enum operand_match_result result; char mnemonic[129]; char *first_arg = 0, *end, *saved_input_pointer; unsigned int sof; gas_assert (strlen (idesc->name) <= 128); strcpy (mnemonic, idesc->name); if (idesc->operands[2] == IA64_OPND_SOF || idesc->operands[1] == IA64_OPND_SOF) { /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we can't parse the first operand until we have parsed the remaining operands of the "alloc" instruction. */ SKIP_WHITESPACE (); first_arg = input_line_pointer; end = strchr (input_line_pointer, '='); if (!end) { as_bad (_("Expected separator `='")); return 0; } input_line_pointer = end + 1; ++i; ++num_outputs; } for (; ; ++i) { if (i < NELEMS (CURR_SLOT.opnd)) { sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=', idesc->operands[i]); if (CURR_SLOT.opnd[i].X_op == O_absent) break; } else { expressionS dummy; sep = parse_operand (&dummy, '='); if (dummy.X_op == O_absent) break; } ++num_operands; if (sep != '=' && sep != ',') break; if (sep == '=') { if (num_outputs > 0) as_bad (_("Duplicate equal sign (=) in instruction")); else num_outputs = i + 1; } } if (sep != '\0') { as_bad (_("Illegal operand separator `%c'"), sep); return 0; } if (idesc->operands[2] == IA64_OPND_SOF || idesc->operands[1] == IA64_OPND_SOF) { /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r. Note, however, that due to that mapping operand numbers in error messages for any of the constant operands will not be correct. */ know (strcmp (idesc->name, "alloc") == 0); /* The first operand hasn't been parsed/initialized, yet (but num_operands intentionally doesn't account for that). */ i = num_operands > 4 ? 2 : 1; #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \ ? CURR_SLOT.opnd[n].X_add_number \ : 0) sof = set_regstack (FORCE_CONST(i), FORCE_CONST(i + 1), FORCE_CONST(i + 2), FORCE_CONST(i + 3)); #undef FORCE_CONST /* now we can parse the first arg: */ saved_input_pointer = input_line_pointer; input_line_pointer = first_arg; sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=', idesc->operands[0]); if (sep != '=') --num_outputs; /* force error */ input_line_pointer = saved_input_pointer; CURR_SLOT.opnd[i].X_add_number = sof; if (CURR_SLOT.opnd[i + 1].X_op == O_constant && CURR_SLOT.opnd[i + 2].X_op == O_constant) CURR_SLOT.opnd[i + 1].X_add_number = sof - CURR_SLOT.opnd[i + 2].X_add_number; else CURR_SLOT.opnd[i + 1].X_op = O_illegal; CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3]; } highest_unmatched_operand = -4; curr_out_of_range_pos = -1; error_pos = 0; for (; idesc; idesc = get_next_opcode (idesc)) { if (num_outputs != idesc->num_outputs) continue; /* mismatch in # of outputs */ if (highest_unmatched_operand < 0) highest_unmatched_operand |= 1; if (num_operands > NELEMS (idesc->operands) || (num_operands < NELEMS (idesc->operands) && idesc->operands[num_operands]) || (num_operands > 0 && !idesc->operands[num_operands - 1])) continue; /* mismatch in number of arguments */ if (highest_unmatched_operand < 0) highest_unmatched_operand |= 2; CURR_SLOT.num_fixups = 0; /* Try to match all operands. If we see an out-of-range operand, then continue trying to match the rest of the operands, since if the rest match, then this idesc will give the best error message. */ out_of_range_pos = -1; for (i = 0; i < num_operands && idesc->operands[i]; ++i) { result = operand_match (idesc, i, CURR_SLOT.opnd + i); if (result != OPERAND_MATCH) { if (result != OPERAND_OUT_OF_RANGE) break; if (out_of_range_pos < 0) /* remember position of the first out-of-range operand: */ out_of_range_pos = i; } } /* If we did not match all operands, or if at least one operand was out-of-range, then this idesc does not match. Keep track of which idesc matched the most operands before failing. If we have two idescs that failed at the same position, and one had an out-of-range operand, then prefer the out-of-range operand. Thus if we have "add r0=0x1000000,r1" we get an error saying the constant is out of range instead of an error saying that the constant should have been a register. */ if (i != num_operands || out_of_range_pos >= 0) { if (i > highest_unmatched_operand || (i == highest_unmatched_operand && out_of_range_pos > curr_out_of_range_pos)) { highest_unmatched_operand = i; if (out_of_range_pos >= 0) { expected_operand = idesc->operands[out_of_range_pos]; error_pos = out_of_range_pos; } else { expected_operand = idesc->operands[i]; error_pos = i; } curr_out_of_range_pos = out_of_range_pos; } continue; } break; } if (!idesc) { if (expected_operand) as_bad (_("Operand %u of `%s' should be %s"), error_pos + 1, mnemonic, elf64_ia64_operands[expected_operand].desc); else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1)) as_bad (_("Wrong number of output operands")); else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2)) as_bad (_("Wrong number of input operands")); else as_bad (_("Operand mismatch")); return 0; } /* Check that the instruction doesn't use - r0, f0, or f1 as output operands - the same predicate twice as output operands - r0 as address of a base update load or store - the same GR as output and address of a base update load - two even- or two odd-numbered FRs as output operands of a floating point parallel load. At most two (conflicting) output (or output-like) operands can exist, (floating point parallel loads have three outputs, but the base register, if updated, cannot conflict with the actual outputs). */ reg2 = reg1 = -1; for (i = 0; i < num_operands; ++i) { int regno = 0; reg_class = 0; switch (idesc->operands[i]) { case IA64_OPND_R1: case IA64_OPND_R2: case IA64_OPND_R3: if (i < num_outputs) { if (CURR_SLOT.opnd[i].X_add_number == REG_GR) reg_class = 'r'; else if (reg1 < 0) reg1 = CURR_SLOT.opnd[i].X_add_number; else if (reg2 < 0) reg2 = CURR_SLOT.opnd[i].X_add_number; } break; case IA64_OPND_P1: case IA64_OPND_P2: if (i < num_outputs) { if (reg1 < 0) reg1 = CURR_SLOT.opnd[i].X_add_number; else if (reg2 < 0) reg2 = CURR_SLOT.opnd[i].X_add_number; } break; case IA64_OPND_F1: case IA64_OPND_F2: case IA64_OPND_F3: case IA64_OPND_F4: if (i < num_outputs) { if (CURR_SLOT.opnd[i].X_add_number >= REG_FR && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1) { reg_class = 'f'; regno = CURR_SLOT.opnd[i].X_add_number - REG_FR; } else if (reg1 < 0) reg1 = CURR_SLOT.opnd[i].X_add_number; else if (reg2 < 0) reg2 = CURR_SLOT.opnd[i].X_add_number; } break; case IA64_OPND_MR3: if (idesc->flags & IA64_OPCODE_POSTINC) { if (CURR_SLOT.opnd[i].X_add_number == REG_GR) reg_class = 'm'; else if (reg1 < 0) reg1 = CURR_SLOT.opnd[i].X_add_number; else if (reg2 < 0) reg2 = CURR_SLOT.opnd[i].X_add_number; } break; default: break; } switch (reg_class) { case 0: break; default: as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno); break; case 'm': as_warn (_("Invalid use of `r%d' as base update address operand"), regno); break; } } if (reg1 == reg2) { if (reg1 >= REG_GR && reg1 <= REG_GR + 127) { reg1 -= REG_GR; reg_class = 'r'; } else if (reg1 >= REG_P && reg1 <= REG_P + 63) { reg1 -= REG_P; reg_class = 'p'; } else if (reg1 >= REG_FR && reg1 <= REG_FR + 127) { reg1 -= REG_FR; reg_class = 'f'; } else reg_class = 0; if (reg_class) as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1); } else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31 && reg2 >= REG_FR && reg2 <= REG_FR + 31) || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)) && ! ((reg1 ^ reg2) & 1)) as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"), reg1 - REG_FR, reg2 - REG_FR); else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127) || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127 && reg2 >= REG_FR && reg2 <= REG_FR + 31)) as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"), reg1 - REG_FR, reg2 - REG_FR); return idesc; } static void build_insn (struct slot *slot, bfd_vma *insnp) { const struct ia64_operand *odesc, *o2desc; struct ia64_opcode *idesc = slot->idesc; bfd_vma insn; bfd_signed_vma val; const char *err; int i; insn = idesc->opcode | slot->qp_regno; for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i) { if (slot->opnd[i].X_op == O_register || slot->opnd[i].X_op == O_constant || slot->opnd[i].X_op == O_index) val = slot->opnd[i].X_add_number; else if (slot->opnd[i].X_op == O_big) { /* This must be the value 0x10000000000000000. */ gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8); val = 0; } else val = 0; switch (idesc->operands[i]) { case IA64_OPND_IMMU64: *insnp++ = (val >> 22) & 0x1ffffffffffLL; insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27) | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21) | (((val >> 63) & 0x1) << 36)); continue; case IA64_OPND_IMMU62: val &= 0x3fffffffffffffffULL; if (val != slot->opnd[i].X_add_number) as_warn (_("Value truncated to 62 bits")); *insnp++ = (val >> 21) & 0x1ffffffffffLL; insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36)); continue; case IA64_OPND_TGT64: val >>= 4; *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2; insn |= ((((val >> 59) & 0x1) << 36) | (((val >> 0) & 0xfffff) << 13)); continue; case IA64_OPND_AR3: val -= REG_AR; break; case IA64_OPND_B1: case IA64_OPND_B2: val -= REG_BR; break; case IA64_OPND_CR3: val -= REG_CR; break; case IA64_OPND_DAHR3: val -= REG_DAHR; break; case IA64_OPND_F1: case IA64_OPND_F2: case IA64_OPND_F3: case IA64_OPND_F4: val -= REG_FR; break; case IA64_OPND_P1: case IA64_OPND_P2: val -= REG_P; break; case IA64_OPND_R1: case IA64_OPND_R2: case IA64_OPND_R3: case IA64_OPND_R3_2: case IA64_OPND_CPUID_R3: case IA64_OPND_DBR_R3: case IA64_OPND_DTR_R3: case IA64_OPND_ITR_R3: case IA64_OPND_IBR_R3: case IA64_OPND_MR3: case IA64_OPND_MSR_R3: case IA64_OPND_PKR_R3: case IA64_OPND_PMC_R3: case IA64_OPND_PMD_R3: case IA64_OPND_DAHR_R3: case IA64_OPND_RR_R3: val -= REG_GR; break; default: break; } odesc = elf64_ia64_operands + idesc->operands[i]; err = (*odesc->insert) (odesc, val, &insn); if (err) as_bad_where (slot->src_file, slot->src_line, _("Bad operand value: %s"), err); if (idesc->flags & IA64_OPCODE_PSEUDO) { if ((idesc->flags & IA64_OPCODE_F2_EQ_F3) && odesc == elf64_ia64_operands + IA64_OPND_F3) { o2desc = elf64_ia64_operands + IA64_OPND_F2; (*o2desc->insert) (o2desc, val, &insn); } if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT) && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a || odesc == elf64_ia64_operands + IA64_OPND_POS6)) { o2desc = elf64_ia64_operands + IA64_OPND_LEN6; (*o2desc->insert) (o2desc, 64 - val, &insn); } } } *insnp = insn; } static void emit_one_bundle (void) { int manual_bundling_off = 0, manual_bundling = 0; enum ia64_unit required_unit, insn_unit = 0; enum ia64_insn_type type[3], insn_type; unsigned int template_val, orig_template; bfd_vma insn[3] = { -1, -1, -1 }; struct ia64_opcode *idesc; int end_of_insn_group = 0, user_template = -1; int n, i, j, first, curr, last_slot; bfd_vma t0 = 0, t1 = 0; struct label_fix *lfix; bfd_boolean mark_label; struct insn_fix *ifix; char mnemonic[16]; fixS *fix; char *f; int addr_mod; first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS; know (first >= 0 && first < NUM_SLOTS); n = MIN (3, md.num_slots_in_use); /* Determine template: user user_template if specified, best match otherwise: */ if (md.slot[first].user_template >= 0) user_template = template_val = md.slot[first].user_template; else { /* Auto select appropriate template. */ memset (type, 0, sizeof (type)); curr = first; for (i = 0; i < n; ++i) { if (md.slot[curr].label_fixups && i != 0) break; type[i] = md.slot[curr].idesc->type; curr = (curr + 1) % NUM_SLOTS; } template_val = best_template[type[0]][type[1]][type[2]]; } /* initialize instructions with appropriate nops: */ for (i = 0; i < 3; ++i) insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]]; f = frag_more (16); /* Check to see if this bundle is at an offset that is a multiple of 16-bytes from the start of the frag. */ addr_mod = frag_now_fix () & 15; if (frag_now->has_code && frag_now->insn_addr != addr_mod) as_bad (_("instruction address is not a multiple of 16")); frag_now->insn_addr = addr_mod; frag_now->has_code = 1; /* now fill in slots with as many insns as possible: */ curr = first; idesc = md.slot[curr].idesc; end_of_insn_group = 0; last_slot = -1; for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i) { /* If we have unwind records, we may need to update some now. */ unw_rec_list *ptr = md.slot[curr].unwind_record; unw_rec_list *end_ptr = NULL; if (ptr) { /* Find the last prologue/body record in the list for the current insn, and set the slot number for all records up to that point. This needs to be done now, because prologue/body records refer to the current point, not the point after the instruction has been issued. This matters because there may have been nops emitted meanwhile. Any non-prologue non-body record followed by a prologue/body record must also refer to the current point. */ unw_rec_list *last_ptr; for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j) end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record; for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next) if (ptr->r.type == prologue || ptr->r.type == prologue_gr || ptr->r.type == body) last_ptr = ptr; if (last_ptr) { /* Make last_ptr point one after the last prologue/body record. */ last_ptr = last_ptr->next; for (ptr = md.slot[curr].unwind_record; ptr != last_ptr; ptr = ptr->next) { ptr->slot_number = (unsigned long) f + i; ptr->slot_frag = frag_now; } /* Remove the initialized records, so that we won't accidentally update them again if we insert a nop and continue. */ md.slot[curr].unwind_record = last_ptr; } } manual_bundling_off = md.slot[curr].manual_bundling_off; if (md.slot[curr].manual_bundling_on) { if (curr == first) manual_bundling = 1; else break; /* Need to start a new bundle. */ } /* If this instruction specifies a template, then it must be the first instruction of a bundle. */ if (curr != first && md.slot[curr].user_template >= 0) break; if (idesc->flags & IA64_OPCODE_SLOT2) { if (manual_bundling && !manual_bundling_off) { as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("`%s' must be last in bundle"), idesc->name); if (i < 2) manual_bundling = -1; /* Suppress meaningless post-loop errors. */ } i = 2; } if (idesc->flags & IA64_OPCODE_LAST) { int required_slot; unsigned int required_template; /* If we need a stop bit after an M slot, our only choice is template 5 (M;;MI). If we need a stop bit after a B slot, our only choice is to place it at the end of the bundle, because the only available templates are MIB, MBB, BBB, MMB, and MFB. We don't handle anything other than M and B slots because these are the only kind of instructions that can have the IA64_OPCODE_LAST bit set. */ required_template = template_val; switch (idesc->type) { case IA64_TYPE_M: required_slot = 0; required_template = 5; break; case IA64_TYPE_B: required_slot = 2; break; default: as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("Internal error: don't know how to force %s to end of instruction group"), idesc->name); required_slot = i; break; } if (manual_bundling && (i > required_slot || (required_slot == 2 && !manual_bundling_off) || (user_template >= 0 /* Changing from MMI to M;MI is OK. */ && (template_val ^ required_template) > 1))) { as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("`%s' must be last in instruction group"), idesc->name); if (i < 2 && required_slot == 2 && !manual_bundling_off) manual_bundling = -1; /* Suppress meaningless post-loop errors. */ } if (required_slot < i) /* Can't fit this instruction. */ break; i = required_slot; if (required_template != template_val) { /* If we switch the template, we need to reset the NOPs after slot i. The slot-types of the instructions ahead of i never change, so we don't need to worry about changing NOPs in front of this slot. */ for (j = i; j < 3; ++j) insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]]; /* We just picked a template that includes the stop bit in the middle, so we don't need another one emitted later. */ md.slot[curr].end_of_insn_group = 0; } template_val = required_template; } if (curr != first && md.slot[curr].label_fixups) { if (manual_bundling) { as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("Label must be first in a bundle")); manual_bundling = -1; /* Suppress meaningless post-loop errors. */ } /* This insn must go into the first slot of a bundle. */ break; } if (end_of_insn_group && md.num_slots_in_use >= 1) { /* We need an instruction group boundary in the middle of a bundle. See if we can switch to an other template with an appropriate boundary. */ orig_template = template_val; if (i == 1 && (user_template == 4 || (user_template < 0 && (ia64_templ_desc[template_val].exec_unit[0] == IA64_UNIT_M)))) { template_val = 5; end_of_insn_group = 0; } else if (i == 2 && (user_template == 0 || (user_template < 0 && (ia64_templ_desc[template_val].exec_unit[1] == IA64_UNIT_I))) /* This test makes sure we don't switch the template if the next instruction is one that needs to be first in an instruction group. Since all those instructions are in the M group, there is no way such an instruction can fit in this bundle even if we switch the template. The reason we have to check for this is that otherwise we may end up generating "MI;;I M.." which has the deadly effect that the second M instruction is no longer the first in the group! --davidm 99/12/16 */ && (idesc->flags & IA64_OPCODE_FIRST) == 0) { template_val = 1; end_of_insn_group = 0; } else if (i == 1 && user_template == 0 && !(idesc->flags & IA64_OPCODE_FIRST)) /* Use the next slot. */ continue; else if (curr != first) /* can't fit this insn */ break; if (template_val != orig_template) /* if we switch the template, we need to reset the NOPs after slot i. The slot-types of the instructions ahead of i never change, so we don't need to worry about changing NOPs in front of this slot. */ for (j = i; j < 3; ++j) insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]]; } required_unit = ia64_templ_desc[template_val].exec_unit[i]; /* resolve dynamic opcodes such as "break", "hint", and "nop": */ if (idesc->type == IA64_TYPE_DYN) { enum ia64_opnd opnd1, opnd2; if ((strcmp (idesc->name, "nop") == 0) || (strcmp (idesc->name, "break") == 0)) insn_unit = required_unit; else if (strcmp (idesc->name, "hint") == 0) { insn_unit = required_unit; if (required_unit == IA64_UNIT_B) { switch (md.hint_b) { case hint_b_ok: break; case hint_b_warning: as_warn (_("hint in B unit may be treated as nop")); break; case hint_b_error: /* When manual bundling is off and there is no user template, we choose a different unit so that hint won't go into the current slot. We will fill the current bundle with nops and try to put hint into the next bundle. */ if (!manual_bundling && user_template < 0) insn_unit = IA64_UNIT_I; else as_bad (_("hint in B unit can't be used")); break; } } } else if (strcmp (idesc->name, "chk.s") == 0 || strcmp (idesc->name, "mov") == 0) { insn_unit = IA64_UNIT_M; if (required_unit == IA64_UNIT_I || (required_unit == IA64_UNIT_F && template_val == 6)) insn_unit = IA64_UNIT_I; } else as_fatal (_("emit_one_bundle: unexpected dynamic op")); snprintf (mnemonic, sizeof (mnemonic), "%s.%c", idesc->name, "?imbfxx"[insn_unit]); opnd1 = idesc->operands[0]; opnd2 = idesc->operands[1]; ia64_free_opcode (idesc); idesc = ia64_find_opcode (mnemonic); /* moves to/from ARs have collisions */ if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3) { while (idesc != NULL && (idesc->operands[0] != opnd1 || idesc->operands[1] != opnd2)) idesc = get_next_opcode (idesc); } md.slot[curr].idesc = idesc; } else { insn_type = idesc->type; insn_unit = IA64_UNIT_NIL; switch (insn_type) { case IA64_TYPE_A: if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M) insn_unit = required_unit; break; case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break; case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break; case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break; case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break; case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break; default: break; } } if (insn_unit != required_unit) continue; /* Try next slot. */ /* Now is a good time to fix up the labels for this insn. */ mark_label = FALSE; for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next) { S_SET_VALUE (lfix->sym, frag_now_fix () - 16); symbol_set_frag (lfix->sym, frag_now); mark_label |= lfix->dw2_mark_labels; } for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next) { S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i); symbol_set_frag (lfix->sym, frag_now); } if (debug_type == DEBUG_DWARF2 || md.slot[curr].loc_directive_seen || mark_label) { bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i; md.slot[curr].loc_directive_seen = 0; if (mark_label) md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK; dwarf2_gen_line_info (addr, &md.slot[curr].debug_line); } build_insn (md.slot + curr, insn + i); ptr = md.slot[curr].unwind_record; if (ptr) { /* Set slot numbers for all remaining unwind records belonging to the current insn. There can not be any prologue/body unwind records here. */ for (; ptr != end_ptr; ptr = ptr->next) { ptr->slot_number = (unsigned long) f + i; ptr->slot_frag = frag_now; } md.slot[curr].unwind_record = NULL; } for (j = 0; j < md.slot[curr].num_fixups; ++j) { ifix = md.slot[curr].fixup + j; fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8, &ifix->expr, ifix->is_pcrel, ifix->code); fix->tc_fix_data.opnd = ifix->opnd; fix->fx_file = md.slot[curr].src_file; fix->fx_line = md.slot[curr].src_line; } end_of_insn_group = md.slot[curr].end_of_insn_group; /* This adjustment to "i" must occur after the fix, otherwise the fix is assigned to the wrong slot, and the VMS linker complains. */ if (required_unit == IA64_UNIT_L) { know (i == 1); /* skip one slot for long/X-unit instructions */ ++i; } --md.num_slots_in_use; last_slot = i; /* clear slot: */ ia64_free_opcode (md.slot[curr].idesc); memset (md.slot + curr, 0, sizeof (md.slot[curr])); md.slot[curr].user_template = -1; if (manual_bundling_off) { manual_bundling = 0; break; } curr = (curr + 1) % NUM_SLOTS; idesc = md.slot[curr].idesc; } /* A user template was specified, but the first following instruction did not fit. This can happen with or without manual bundling. */ if (md.num_slots_in_use > 0 && last_slot < 0) { as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("`%s' does not fit into %s template"), idesc->name, ia64_templ_desc[template_val].name); /* Drop first insn so we don't livelock. */ --md.num_slots_in_use; know (curr == first); ia64_free_opcode (md.slot[curr].idesc); memset (md.slot + curr, 0, sizeof (md.slot[curr])); md.slot[curr].user_template = -1; } else if (manual_bundling > 0) { if (md.num_slots_in_use > 0) { if (last_slot >= 2) as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("`%s' does not fit into bundle"), idesc->name); else { const char *where; if (template_val == 2) where = "X slot"; else if (last_slot == 0) where = "slots 2 or 3"; else where = "slot 3"; as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("`%s' can't go in %s of %s template"), idesc->name, where, ia64_templ_desc[template_val].name); } } else as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, _("Missing '}' at end of file")); } know (md.num_slots_in_use < NUM_SLOTS); t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46); t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23); number_to_chars_littleendian (f + 0, t0, 8); number_to_chars_littleendian (f + 8, t1, 8); } int md_parse_option (int c, char *arg) { switch (c) { /* Switches from the Intel assembler. */ case 'm': if (strcmp (arg, "ilp64") == 0 || strcmp (arg, "lp64") == 0 || strcmp (arg, "p64") == 0) { md.flags |= EF_IA_64_ABI64; } else if (strcmp (arg, "ilp32") == 0) { md.flags &= ~EF_IA_64_ABI64; } else if (strcmp (arg, "le") == 0) { md.flags &= ~EF_IA_64_BE; default_big_endian = 0; } else if (strcmp (arg, "be") == 0) { md.flags |= EF_IA_64_BE; default_big_endian = 1; } else if (strncmp (arg, "unwind-check=", 13) == 0) { arg += 13; if (strcmp (arg, "warning") == 0) md.unwind_check = unwind_check_warning; else if (strcmp (arg, "error") == 0) md.unwind_check = unwind_check_error; else return 0; } else if (strncmp (arg, "hint.b=", 7) == 0) { arg += 7; if (strcmp (arg, "ok") == 0) md.hint_b = hint_b_ok; else if (strcmp (arg, "warning") == 0) md.hint_b = hint_b_warning; else if (strcmp (arg, "error") == 0) md.hint_b = hint_b_error; else return 0; } else if (strncmp (arg, "tune=", 5) == 0) { arg += 5; if (strcmp (arg, "itanium1") == 0) md.tune = itanium1; else if (strcmp (arg, "itanium2") == 0) md.tune = itanium2; else return 0; } else return 0; break; case 'N': if (strcmp (arg, "so") == 0) { /* Suppress signon message. */ } else if (strcmp (arg, "pi") == 0) { /* Reject privileged instructions. FIXME */ } else if (strcmp (arg, "us") == 0) { /* Allow union of signed and unsigned range. FIXME */ } else if (strcmp (arg, "close_fcalls") == 0) { /* Do not resolve global function calls. */ } else return 0; break; case 'C': /* temp[="prefix"] Insert temporary labels into the object file symbol table prefixed by "prefix". Default prefix is ":temp:". */ break; case 'a': /* indirect=<tgt> Assume unannotated indirect branches behavior according to <tgt> -- exit: branch out from the current context (default) labels: all labels in context may be branch targets */ if (strncmp (arg, "indirect=", 9) != 0) return 0; break; case 'x': /* -X conflicts with an ignored option, use -x instead */ md.detect_dv = 1; if (!arg || strcmp (arg, "explicit") == 0) { /* set default mode to explicit */ md.default_explicit_mode = 1; break; } else if (strcmp (arg, "auto") == 0) { md.default_explicit_mode = 0; } else if (strcmp (arg, "none") == 0) { md.detect_dv = 0; } else if (strcmp (arg, "debug") == 0) { md.debug_dv = 1; } else if (strcmp (arg, "debugx") == 0) { md.default_explicit_mode = 1; md.debug_dv = 1; } else if (strcmp (arg, "debugn") == 0) { md.debug_dv = 1; md.detect_dv = 0; } else { as_bad (_("Unrecognized option '-x%s'"), arg); } break; case 'S': /* nops Print nops statistics. */ break; /* GNU specific switches for gcc. */ case OPTION_MCONSTANT_GP: md.flags |= EF_IA_64_CONS_GP; break; case OPTION_MAUTO_PIC: md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP; break; default: return 0; } return 1; } void md_show_usage (FILE *stream) { fputs (_("\ IA-64 options:\n\ --mconstant-gp mark output file as using the constant-GP model\n\ (sets ELF header flag EF_IA_64_CONS_GP)\n\ --mauto-pic mark output file as using the constant-GP model\n\ without function descriptors (sets ELF header flag\n\ EF_IA_64_NOFUNCDESC_CONS_GP)\n\ -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\ -mle | -mbe select little- or big-endian byte order (default -mle)\n\ -mtune=[itanium1|itanium2]\n\ tune for a specific CPU (default -mtune=itanium2)\n\ -munwind-check=[warning|error]\n\ unwind directive check (default -munwind-check=warning)\n\ -mhint.b=[ok|warning|error]\n\ hint.b check (default -mhint.b=error)\n\ -x | -xexplicit turn on dependency violation checking\n"), stream); /* Note for translators: "automagically" can be translated as "automatically" here. */ fputs (_("\ -xauto automagically remove dependency violations (default)\n\ -xnone turn off dependency violation checking\n\ -xdebug debug dependency violation checker\n\ -xdebugn debug dependency violation checker but turn off\n\ dependency violation checking\n\ -xdebugx debug dependency violation checker and turn on\n\ dependency violation checking\n"), stream); } void ia64_after_parse_args (void) { if (debug_type == DEBUG_STABS) as_fatal (_("--gstabs is not supported for ia64")); } /* Return true if TYPE fits in TEMPL at SLOT. */ static int match (int templ, int type, int slot) { enum ia64_unit unit; int result; unit = ia64_templ_desc[templ].exec_unit[slot]; switch (type) { case IA64_TYPE_DYN: result = 1; break; /* for nop and break */ case IA64_TYPE_A: result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M); break; case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break; case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break; case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break; case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break; case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break; default: result = 0; break; } return result; } /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of type M or I would fit in TEMPL at SLOT. */ static inline int extra_goodness (int templ, int slot) { switch (md.tune) { case itanium1: if (slot == 1 && match (templ, IA64_TYPE_F, slot)) return 2; else if (slot == 2 && match (templ, IA64_TYPE_B, slot)) return 1; else return 0; break; case itanium2: if (match (templ, IA64_TYPE_M, slot) || match (templ, IA64_TYPE_I, slot)) /* Favor M- and I-unit NOPs. We definitely want to avoid F-unit and B-unit may cause split-issue or less-than-optimal branch-prediction. */ return 2; else return 0; break; default: abort (); return 0; } } /* This function is called once, at assembler startup time. It sets up all the tables, etc. that the MD part of the assembler will need that can be determined before arguments are parsed. */ void md_begin (void) { int i, j, k, t, goodness, best, ok; const char *err; char name[8]; md.auto_align = 1; md.explicit_mode = md.default_explicit_mode; bfd_set_section_alignment (stdoutput, text_section, 4); /* Make sure function pointers get initialized. */ target_big_endian = -1; dot_byteorder (default_big_endian); alias_hash = hash_new (); alias_name_hash = hash_new (); secalias_hash = hash_new (); secalias_name_hash = hash_new (); pseudo_func[FUNC_DTP_MODULE].u.sym = symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE, &zero_address_frag); pseudo_func[FUNC_DTP_RELATIVE].u.sym = symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE, &zero_address_frag); pseudo_func[FUNC_FPTR_RELATIVE].u.sym = symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE, &zero_address_frag); pseudo_func[FUNC_GP_RELATIVE].u.sym = symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE, &zero_address_frag); pseudo_func[FUNC_LT_RELATIVE].u.sym = symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE, &zero_address_frag); pseudo_func[FUNC_LT_RELATIVE_X].u.sym = symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X, &zero_address_frag); pseudo_func[FUNC_PC_RELATIVE].u.sym = symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE, &zero_address_frag); pseudo_func[FUNC_PLT_RELATIVE].u.sym = symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE, &zero_address_frag); pseudo_func[FUNC_SEC_RELATIVE].u.sym = symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE, &zero_address_frag); pseudo_func[FUNC_SEG_RELATIVE].u.sym = symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE, &zero_address_frag); pseudo_func[FUNC_TP_RELATIVE].u.sym = symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE, &zero_address_frag); pseudo_func[FUNC_LTV_RELATIVE].u.sym = symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE, &zero_address_frag); pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym = symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE, &zero_address_frag); pseudo_func[FUNC_LT_DTP_MODULE].u.sym = symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE, &zero_address_frag); pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym = symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE, &zero_address_frag); pseudo_func[FUNC_LT_TP_RELATIVE].u.sym = symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE, &zero_address_frag); pseudo_func[FUNC_IPLT_RELOC].u.sym = symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC, &zero_address_frag); #ifdef TE_VMS pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym = symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC, &zero_address_frag); #endif if (md.tune != itanium1) { /* Convert MFI NOPs bundles into MMI NOPs bundles. */ le_nop[0] = 0x8; le_nop_stop[0] = 0x9; } /* Compute the table of best templates. We compute goodness as a base 4 value, in which each match counts for 3. Match-failures result in NOPs and we use extra_goodness() to pick the execution units that are best suited for issuing the NOP. */ for (i = 0; i < IA64_NUM_TYPES; ++i) for (j = 0; j < IA64_NUM_TYPES; ++j) for (k = 0; k < IA64_NUM_TYPES; ++k) { best = 0; for (t = 0; t < NELEMS (ia64_templ_desc); ++t) { goodness = 0; if (match (t, i, 0)) { if (match (t, j, 1)) { if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2)) goodness = 3 + 3 + 3; else goodness = 3 + 3 + extra_goodness (t, 2); } else if (match (t, j, 2)) goodness = 3 + 3 + extra_goodness (t, 1); else { goodness = 3; goodness += extra_goodness (t, 1); goodness += extra_goodness (t, 2); } } else if (match (t, i, 1)) { if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2)) goodness = 3 + 3; else goodness = 3 + extra_goodness (t, 2); } else if (match (t, i, 2)) goodness = 3 + extra_goodness (t, 1); if (goodness > best) { best = goodness; best_template[i][j][k] = t; } } } #ifdef DEBUG_TEMPLATES /* For debugging changes to the best_template calculations. We don't care about combinations with invalid instructions, so start the loops at 1. */ for (i = 0; i < IA64_NUM_TYPES; ++i) for (j = 0; j < IA64_NUM_TYPES; ++j) for (k = 0; k < IA64_NUM_TYPES; ++k) { char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f', 'x', 'd' }; fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j], type_letter[k], ia64_templ_desc[best_template[i][j][k]].name); } #endif for (i = 0; i < NUM_SLOTS; ++i) md.slot[i].user_template = -1; md.pseudo_hash = hash_new (); for (i = 0; i < NELEMS (pseudo_opcode); ++i) { err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name, (void *) (pseudo_opcode + i)); if (err) as_fatal (_("ia64.md_begin: can't hash `%s': %s"), pseudo_opcode[i].name, err); } md.reg_hash = hash_new (); md.dynreg_hash = hash_new (); md.const_hash = hash_new (); md.entry_hash = hash_new (); /* general registers: */ declare_register_set ("r", 128, REG_GR); declare_register ("gp", REG_GR + 1); declare_register ("sp", REG_GR + 12); declare_register ("tp", REG_GR + 13); declare_register_set ("ret", 4, REG_GR + 8); /* floating point registers: */ declare_register_set ("f", 128, REG_FR); declare_register_set ("farg", 8, REG_FR + 8); declare_register_set ("fret", 8, REG_FR + 8); /* branch registers: */ declare_register_set ("b", 8, REG_BR); declare_register ("rp", REG_BR + 0); /* predicate registers: */ declare_register_set ("p", 64, REG_P); declare_register ("pr", REG_PR); declare_register ("pr.rot", REG_PR_ROT); /* application registers: */ declare_register_set ("ar", 128, REG_AR); for (i = 0; i < NELEMS (ar); ++i) declare_register (ar[i].name, REG_AR + ar[i].regnum); /* control registers: */ declare_register_set ("cr", 128, REG_CR); for (i = 0; i < NELEMS (cr); ++i) declare_register (cr[i].name, REG_CR + cr[i].regnum); /* dahr registers: */ declare_register_set ("dahr", 8, REG_DAHR); declare_register ("ip", REG_IP); declare_register ("cfm", REG_CFM); declare_register ("psr", REG_PSR); declare_register ("psr.l", REG_PSR_L); declare_register ("psr.um", REG_PSR_UM); for (i = 0; i < NELEMS (indirect_reg); ++i) { unsigned int regnum = indirect_reg[i].regnum; md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum); } /* pseudo-registers used to specify unwind info: */ declare_register ("psp", REG_PSP); for (i = 0; i < NELEMS (const_bits); ++i) { err = hash_insert (md.const_hash, const_bits[i].name, (void *) (const_bits + i)); if (err) as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"), name, err); } /* Set the architecture and machine depending on defaults and command line options. */ if (md.flags & EF_IA_64_ABI64) ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64); else ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32); if (! ok) as_warn (_("Could not set architecture and machine")); /* Set the pointer size and pointer shift size depending on md.flags */ if (md.flags & EF_IA_64_ABI64) { md.pointer_size = 8; /* pointers are 8 bytes */ md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */ } else { md.pointer_size = 4; /* pointers are 4 bytes */ md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */ } md.mem_offset.hint = 0; md.path = 0; md.maxpaths = 0; md.entry_labels = NULL; } /* Set the default options in md. Cannot do this in md_begin because that is called after md_parse_option which is where we set the options in md based on command line options. */ void ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) { md.flags = MD_FLAGS_DEFAULT; #ifndef TE_VMS /* Don't turn on dependency checking for VMS, doesn't work. */ md.detect_dv = 1; #endif /* FIXME: We should change it to unwind_check_error someday. */ md.unwind_check = unwind_check_warning; md.hint_b = hint_b_error; md.tune = itanium2; } /* Return a string for the target object file format. */ const char * ia64_target_format (void) { if (OUTPUT_FLAVOR == bfd_target_elf_flavour) { if (md.flags & EF_IA_64_BE) { if (md.flags & EF_IA_64_ABI64) #if defined(TE_AIX50) return "elf64-ia64-aix-big"; #elif defined(TE_HPUX) return "elf64-ia64-hpux-big"; #else return "elf64-ia64-big"; #endif else #if defined(TE_AIX50) return "elf32-ia64-aix-big"; #elif defined(TE_HPUX) return "elf32-ia64-hpux-big"; #else return "elf32-ia64-big"; #endif } else { if (md.flags & EF_IA_64_ABI64) #if defined (TE_AIX50) return "elf64-ia64-aix-little"; #elif defined (TE_VMS) { md.flags |= EF_IA_64_ARCHVER_1; return "elf64-ia64-vms"; } #else return "elf64-ia64-little"; #endif else #ifdef TE_AIX50 return "elf32-ia64-aix-little"; #else return "elf32-ia64-little"; #endif } } else return "unknown-format"; } void ia64_end_of_source (void) { /* terminate insn group upon reaching end of file: */ insn_group_break (1, 0, 0); /* emits slots we haven't written yet: */ ia64_flush_insns (); bfd_set_private_flags (stdoutput, md.flags); md.mem_offset.hint = 0; } void ia64_start_line (void) { static int first; if (!first) { /* Make sure we don't reference input_line_pointer[-1] when that's not valid. */ first = 1; return; } if (md.qp.X_op == O_register) as_bad (_("qualifying predicate not followed by instruction")); md.qp.X_op = O_absent; if (ignore_input ()) return; if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';') { if (md.detect_dv && !md.explicit_mode) { static int warned; if (!warned) { warned = 1; as_warn (_("Explicit stops are ignored in auto mode")); } } else insn_group_break (1, 0, 0); } else if (input_line_pointer[-1] == '{') { if (md.manual_bundling) as_warn (_("Found '{' when manual bundling is already turned on")); else CURR_SLOT.manual_bundling_on = 1; md.manual_bundling = 1; /* Bundling is only acceptable in explicit mode or when in default automatic mode. */ if (md.detect_dv && !md.explicit_mode) { if (!md.mode_explicitly_set && !md.default_explicit_mode) dot_dv_mode ('E'); else as_warn (_("Found '{' after explicit switch to automatic mode")); } } else if (input_line_pointer[-1] == '}') { if (!md.manual_bundling) as_warn (_("Found '}' when manual bundling is off")); else PREV_SLOT.manual_bundling_off = 1; md.manual_bundling = 0; /* switch back to automatic mode, if applicable */ if (md.detect_dv && md.explicit_mode && !md.mode_explicitly_set && !md.default_explicit_mode) dot_dv_mode ('A'); } } /* This is a hook for ia64_frob_label, so that it can distinguish tags from labels. */ static int defining_tag = 0; int ia64_unrecognized_line (int ch) { switch (ch) { case '(': expression_and_evaluate (&md.qp); if (*input_line_pointer++ != ')') { as_bad (_("Expected ')'")); return 0; } if (md.qp.X_op != O_register) { as_bad (_("Qualifying predicate expected")); return 0; } if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64) { as_bad (_("Predicate register expected")); return 0; } return 1; case '[': { char *s; char c; symbolS *tag; int temp; if (md.qp.X_op == O_register) { as_bad (_("Tag must come before qualifying predicate.")); return 0; } /* This implements just enough of read_a_source_file in read.c to recognize labels. */ if (is_name_beginner (*input_line_pointer)) { c = get_symbol_name (&s); } else if (LOCAL_LABELS_FB && ISDIGIT (*input_line_pointer)) { temp = 0; while (ISDIGIT (*input_line_pointer)) temp = (temp * 10) + *input_line_pointer++ - '0'; fb_label_instance_inc (temp); s = fb_label_name (temp, 0); c = *input_line_pointer; } else { s = NULL; c = '\0'; } if (c != ':') { /* Put ':' back for error messages' sake. */ *input_line_pointer++ = ':'; as_bad (_("Expected ':'")); return 0; } defining_tag = 1; tag = colon (s); defining_tag = 0; /* Put ':' back for error messages' sake. */ *input_line_pointer++ = ':'; if (*input_line_pointer++ != ']') { as_bad (_("Expected ']'")); return 0; } if (! tag) { as_bad (_("Tag name expected")); return 0; } return 1; } default: break; } /* Not a valid line. */ return 0; } void ia64_frob_label (struct symbol *sym) { struct label_fix *fix; /* Tags need special handling since they are not bundle breaks like labels. */ if (defining_tag) { fix = obstack_alloc (&notes, sizeof (*fix)); fix->sym = sym; fix->next = CURR_SLOT.tag_fixups; fix->dw2_mark_labels = FALSE; CURR_SLOT.tag_fixups = fix; return; } if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) { md.last_text_seg = now_seg; fix = obstack_alloc (&notes, sizeof (*fix)); fix->sym = sym; fix->next = CURR_SLOT.label_fixups; fix->dw2_mark_labels = dwarf2_loc_mark_labels; CURR_SLOT.label_fixups = fix; /* Keep track of how many code entry points we've seen. */ if (md.path == md.maxpaths) { md.maxpaths += 20; md.entry_labels = (const char **) xrealloc ((void *) md.entry_labels, md.maxpaths * sizeof (char *)); } md.entry_labels[md.path++] = S_GET_NAME (sym); } } #ifdef TE_HPUX /* The HP-UX linker will give unresolved symbol errors for symbols that are declared but unused. This routine removes declared, unused symbols from an object. */ int ia64_frob_symbol (struct symbol *sym) { if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) && ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT) || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr && ! S_IS_EXTERNAL (sym))) return 1; return 0; } #endif void ia64_flush_pending_output (void) { if (!md.keep_pending_output && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) { /* ??? This causes many unnecessary stop bits to be emitted. Unfortunately, it isn't clear if it is safe to remove this. */ insn_group_break (1, 0, 0); ia64_flush_insns (); } } /* Do ia64-specific expression optimization. All that's done here is to transform index expressions that are either due to the indexing of rotating registers or due to the indexing of indirect register sets. */ int ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r) { if (op != O_index) return 0; resolve_expression (l); if (l->X_op == O_register) { unsigned num_regs = l->X_add_number >> 16; resolve_expression (r); if (num_regs) { /* Left side is a .rotX-allocated register. */ if (r->X_op != O_constant) { as_bad (_("Rotating register index must be a non-negative constant")); r->X_add_number = 0; } else if ((valueT) r->X_add_number >= num_regs) { as_bad (_("Index out of range 0..%u"), num_regs - 1); r->X_add_number = 0; } l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number; return 1; } else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR) { if (r->X_op != O_register || r->X_add_number < REG_GR || r->X_add_number > REG_GR + 127) { as_bad (_("Indirect register index must be a general register")); r->X_add_number = REG_GR; } l->X_op = O_index; l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID]; l->X_add_number = r->X_add_number; return 1; } } as_bad (_("Index can only be applied to rotating or indirect registers")); /* Fall back to some register use of which has as little as possible side effects, to minimize subsequent error messages. */ l->X_op = O_register; l->X_add_number = REG_GR + 3; return 1; } int ia64_parse_name (char *name, expressionS *e, char *nextcharP) { struct const_desc *cdesc; struct dynreg *dr = 0; unsigned int idx; struct symbol *sym; char *end; if (*name == '@') { enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE; /* Find what relocation pseudo-function we're dealing with. */ for (idx = 0; idx < NELEMS (pseudo_func); ++idx) if (pseudo_func[idx].name && pseudo_func[idx].name[0] == name[1] && strcmp (pseudo_func[idx].name + 1, name + 2) == 0) { pseudo_type = pseudo_func[idx].type; break; } switch (pseudo_type) { case PSEUDO_FUNC_RELOC: end = input_line_pointer; if (*nextcharP != '(') { as_bad (_("Expected '('")); break; } /* Skip '('. */ ++input_line_pointer; expression (e); if (*input_line_pointer != ')') { as_bad (_("Missing ')'")); goto done; } /* Skip ')'. */ ++input_line_pointer; #ifdef TE_VMS if (idx == FUNC_SLOTCOUNT_RELOC) { /* @slotcount can accept any expression. Canonicalize. */ e->X_add_symbol = make_expr_symbol (e); e->X_op = O_symbol; e->X_add_number = 0; } #endif if (e->X_op != O_symbol) { if (e->X_op != O_pseudo_fixup) { as_bad (_("Not a symbolic expression")); goto done; } if (idx != FUNC_LT_RELATIVE) { as_bad (_("Illegal combination of relocation functions")); goto done; } switch (S_GET_VALUE (e->X_op_symbol)) { case FUNC_FPTR_RELATIVE: idx = FUNC_LT_FPTR_RELATIVE; break; case FUNC_DTP_MODULE: idx = FUNC_LT_DTP_MODULE; break; case FUNC_DTP_RELATIVE: idx = FUNC_LT_DTP_RELATIVE; break; case FUNC_TP_RELATIVE: idx = FUNC_LT_TP_RELATIVE; break; default: as_bad (_("Illegal combination of relocation functions")); goto done; } } /* Make sure gas doesn't get rid of local symbols that are used in relocs. */ e->X_op = O_pseudo_fixup; e->X_op_symbol = pseudo_func[idx].u.sym; done: *nextcharP = *input_line_pointer; break; case PSEUDO_FUNC_CONST: e->X_op = O_constant; e->X_add_number = pseudo_func[idx].u.ival; break; case PSEUDO_FUNC_REG: e->X_op = O_register; e->X_add_number = pseudo_func[idx].u.ival; break; default: return 0; } return 1; } /* first see if NAME is a known register name: */ sym = hash_find (md.reg_hash, name); if (sym) { e->X_op = O_register; e->X_add_number = S_GET_VALUE (sym); return 1; } cdesc = hash_find (md.const_hash, name); if (cdesc) { e->X_op = O_constant; e->X_add_number = cdesc->value; return 1; } /* check for inN, locN, or outN: */ idx = 0; switch (name[0]) { case 'i': if (name[1] == 'n' && ISDIGIT (name[2])) { dr = &md.in; idx = 2; } break; case 'l': if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3])) { dr = &md.loc; idx = 3; } break; case 'o': if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3])) { dr = &md.out; idx = 3; } break; default: break; } /* Ignore register numbers with leading zeroes, except zero itself. */ if (dr && (name[idx] != '0' || name[idx + 1] == '\0')) { unsigned long regnum; /* The name is inN, locN, or outN; parse the register number. */ regnum = strtoul (name + idx, &end, 10); if (end > name + idx && *end == '\0' && regnum < 96) { if (regnum >= dr->num_regs) { if (!dr->num_regs) as_bad (_("No current frame")); else as_bad (_("Register number out of range 0..%u"), dr->num_regs - 1); regnum = 0; } e->X_op = O_register; e->X_add_number = dr->base + regnum; return 1; } } end = alloca (strlen (name) + 1); strcpy (end, name); name = ia64_canonicalize_symbol_name (end); if ((dr = hash_find (md.dynreg_hash, name))) { /* We've got ourselves the name of a rotating register set. Store the base register number in the low 16 bits of X_add_number and the size of the register set in the top 16 bits. */ e->X_op = O_register; e->X_add_number = dr->base | (dr->num_regs << 16); return 1; } return 0; } /* Remove the '#' suffix that indicates a symbol as opposed to a register. */ char * ia64_canonicalize_symbol_name (char *name) { size_t len = strlen (name), full = len; while (len > 0 && name[len - 1] == '#') --len; if (len <= 0) { if (full > 0) as_bad (_("Standalone `#' is illegal")); } else if (len < full - 1) as_warn (_("Redundant `#' suffix operators")); name[len] = '\0'; return name; } /* Return true if idesc is a conditional branch instruction. This excludes the modulo scheduled branches, and br.ia. Mod-sched branches are excluded because they always read/write resources regardless of the value of the qualifying predicate. br.ia must always use p0, and hence is always taken. Thus this function returns true for branches which can fall through, and which use no resources if they do fall through. */ static int is_conditional_branch (struct ia64_opcode *idesc) { /* br is a conditional branch. Everything that starts with br. except br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch. Everything that starts with brl is a conditional branch. */ return (idesc->name[0] == 'b' && idesc->name[1] == 'r' && (idesc->name[2] == '\0' || (idesc->name[2] == '.' && idesc->name[3] != 'i' && idesc->name[3] != 'c' && idesc->name[3] != 'w') || idesc->name[2] == 'l' /* br.cond, br.call, br.clr */ || (idesc->name[2] == '.' && idesc->name[3] == 'c' && (idesc->name[4] == 'a' || idesc->name[4] == 'o' || (idesc->name[4] == 'l' && idesc->name[5] == 'r'))))); } /* Return whether the given opcode is a taken branch. If there's any doubt, returns zero. */ static int is_taken_branch (struct ia64_opcode *idesc) { return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0) || strncmp (idesc->name, "br.ia", 5) == 0); } /* Return whether the given opcode is an interruption or rfi. If there's any doubt, returns zero. */ static int is_interruption_or_rfi (struct ia64_opcode *idesc) { if (strcmp (idesc->name, "rfi") == 0) return 1; return 0; } /* Returns the index of the given dependency in the opcode's list of chks, or -1 if there is no dependency. */ static int depends_on (int depind, struct ia64_opcode *idesc) { int i; const struct ia64_opcode_dependency *dep = idesc->dependencies; for (i = 0; i < dep->nchks; i++) { if (depind == DEP (dep->chks[i])) return i; } return -1; } /* Determine a set of specific resources used for a particular resource class. Returns the number of specific resources identified For those cases which are not determinable statically, the resource returned is marked nonspecific. Meanings of value in 'NOTE': 1) only read/write when the register number is explicitly encoded in the insn. 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only accesses CFM when qualifying predicate is in the rotating region. 3) general register value is used to specify an indirect register; not determinable statically. 4) only read the given resource when bits 7:0 of the indirect index register value does not match the register number of the resource; not determinable statically. 5) all rules are implementation specific. 6) only when both the index specified by the reader and the index specified by the writer have the same value in bits 63:61; not determinable statically. 7) only access the specified resource when the corresponding mask bit is set 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is only read when these insns reference FR2-31 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only written when these insns write FR32-127 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the instruction 11) The target predicates are written independently of PR[qp], but source registers are only read if PR[qp] is true. Since the state of PR[qp] cannot statically be determined, all source registers are marked used. 12) This insn only reads the specified predicate register when that register is the PR[qp]. 13) This reference to ld-c only applies to the GR whose value is loaded with data returned from memory, not the post-incremented address register. 14) The RSE resource includes the implementation-specific RSE internal state resources. At least one (and possibly more) of these resources are read by each instruction listed in IC:rse-readers. At least one (and possibly more) of these resources are written by each insn listed in IC:rse-writers. 15+16) Represents reserved instructions, which the assembler does not generate. 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up. Memory resources (i.e. locations in memory) are *not* marked or tracked by this code; there are no dependency violations based on memory access. */ #define MAX_SPECS 256 #define DV_CHK 1 #define DV_REG 0 static int specify_resource (const struct ia64_dependency *dep, struct ia64_opcode *idesc, /* is this a DV chk or a DV reg? */ int type, /* returned specific resources */ struct rsrc specs[MAX_SPECS], /* resource note for this insn's usage */ int note, /* which execution path to examine */ int path) { int count = 0; int i; int rsrc_write = 0; struct rsrc tmpl; if (dep->mode == IA64_DV_WAW || (dep->mode == IA64_DV_RAW && type == DV_REG) || (dep->mode == IA64_DV_WAR && type == DV_CHK)) rsrc_write = 1; /* template for any resources we identify */ tmpl.dependency = dep; tmpl.note = note; tmpl.insn_srlz = tmpl.data_srlz = 0; tmpl.qp_regno = CURR_SLOT.qp_regno; tmpl.link_to_qp_branch = 1; tmpl.mem_offset.hint = 0; tmpl.mem_offset.offset = 0; tmpl.mem_offset.base = 0; tmpl.specific = 1; tmpl.index = -1; tmpl.cmp_type = CMP_NONE; tmpl.depind = 0; tmpl.file = NULL; tmpl.line = 0; tmpl.path = 0; #define UNHANDLED \ as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \ dep->name, idesc->name, (rsrc_write?"write":"read"), note) #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path) /* we don't need to track these */ if (dep->semantics == IA64_DVS_NONE) return 0; switch (dep->specifier) { case IA64_RS_AR_K: if (note == 1) { if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; if (regno >= 0 && regno <= 7) { specs[count] = tmpl; specs[count++].index = regno; } } } else if (note == 0) { for (i = 0; i < 8; i++) { specs[count] = tmpl; specs[count++].index = i; } } else { UNHANDLED; } break; case IA64_RS_AR_UNAT: /* This is a mov =AR or mov AR= instruction. */ if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; if (regno == AR_UNAT) { specs[count++] = tmpl; } } else { /* This is a spill/fill, or other instruction that modifies the unat register. */ /* Unless we can determine the specific bits used, mark the whole thing; bits 8:3 of the memory address indicate the bit used in UNAT. The .mem.offset hint may be used to eliminate a small subset of conflicts. */ specs[count] = tmpl; if (md.mem_offset.hint) { if (md.debug_dv) fprintf (stderr, " Using hint for spill/fill\n"); /* The index isn't actually used, just set it to something approximating the bit index. */ specs[count].index = (md.mem_offset.offset >> 3) & 0x3F; specs[count].mem_offset.hint = 1; specs[count].mem_offset.offset = md.mem_offset.offset; specs[count++].mem_offset.base = md.mem_offset.base; } else { specs[count++].specific = 0; } } break; case IA64_RS_AR: if (note == 1) { if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; if ((regno >= 8 && regno <= 15) || (regno >= 20 && regno <= 23) || (regno >= 31 && regno <= 39) || (regno >= 41 && regno <= 47) || (regno >= 67 && regno <= 111)) { specs[count] = tmpl; specs[count++].index = regno; } } } else { UNHANDLED; } break; case IA64_RS_ARb: if (note == 1) { if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; if ((regno >= 48 && regno <= 63) || (regno >= 112 && regno <= 127)) { specs[count] = tmpl; specs[count++].index = regno; } } } else if (note == 0) { for (i = 48; i < 64; i++) { specs[count] = tmpl; specs[count++].index = i; } for (i = 112; i < 128; i++) { specs[count] = tmpl; specs[count++].index = i; } } else { UNHANDLED; } break; case IA64_RS_BR: if (note != 1) { UNHANDLED; } else { if (rsrc_write) { for (i = 0; i < idesc->num_outputs; i++) if (idesc->operands[i] == IA64_OPND_B1 || idesc->operands[i] == IA64_OPND_B2) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[i].X_add_number - REG_BR; } } else { for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++) if (idesc->operands[i] == IA64_OPND_B1 || idesc->operands[i] == IA64_OPND_B2) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[i].X_add_number - REG_BR; } } } break; case IA64_RS_CPUID: /* four or more registers */ if (note == 3) { if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { specs[count] = tmpl; specs[count++].index = gr_values[regno].value & 0xFF; } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else { UNHANDLED; } break; case IA64_RS_DBR: /* four or more registers */ if (note == 3) { if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { specs[count] = tmpl; specs[count++].index = gr_values[regno].value & 0xFF; } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else if (note == 0 && !rsrc_write) { specs[count] = tmpl; specs[count++].specific = 0; } else { UNHANDLED; } break; case IA64_RS_IBR: /* four or more registers */ if (note == 3) { if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { specs[count] = tmpl; specs[count++].index = gr_values[regno].value & 0xFF; } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else { UNHANDLED; } break; case IA64_RS_MSR: if (note == 5) { /* These are implementation specific. Force all references to conflict with all other references. */ specs[count] = tmpl; specs[count++].specific = 0; } else { UNHANDLED; } break; case IA64_RS_PKR: /* 16 or more registers */ if (note == 3 || note == 4) { if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { if (note == 3) { specs[count] = tmpl; specs[count++].index = gr_values[regno].value & 0xFF; } else for (i = 0; i < NELEMS (gr_values); i++) { /* Uses all registers *except* the one in R3. */ if ((unsigned)i != (gr_values[regno].value & 0xFF)) { specs[count] = tmpl; specs[count++].index = i; } } } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else if (note == 0) { /* probe et al. */ specs[count] = tmpl; specs[count++].specific = 0; } break; case IA64_RS_PMC: /* four or more registers */ if (note == 3) { if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3)) { int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write) ? 1 : !rsrc_write); int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { specs[count] = tmpl; specs[count++].index = gr_values[regno].value & 0xFF; } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else { UNHANDLED; } break; case IA64_RS_PMD: /* four or more registers */ if (note == 3) { if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { specs[count] = tmpl; specs[count++].index = gr_values[regno].value & 0xFF; } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else { UNHANDLED; } break; case IA64_RS_RR: /* eight registers */ if (note == 6) { if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; if (regno >= 0 && regno < NELEMS (gr_values) && KNOWN (regno)) { specs[count] = tmpl; specs[count++].index = (gr_values[regno].value >> 61) & 0x7; } else { specs[count] = tmpl; specs[count++].specific = 0; } } } else if (note == 0 && !rsrc_write) { specs[count] = tmpl; specs[count++].specific = 0; } else { UNHANDLED; } break; case IA64_RS_CR_IRR: if (note == 0) { /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */ int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR; if (rsrc_write && idesc->operands[1] == IA64_OPND_CR3 && regno == CR_IVR) { for (i = 0; i < 4; i++) { specs[count] = tmpl; specs[count++].index = CR_IRR0 + i; } } } else if (note == 1) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; if (idesc->operands[!rsrc_write] == IA64_OPND_CR3 && regno >= CR_IRR0 && regno <= CR_IRR3) { specs[count] = tmpl; specs[count++].index = regno; } } else { UNHANDLED; } break; case IA64_RS_CR_IIB: if (note != 0) { UNHANDLED; } else { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; if (idesc->operands[!rsrc_write] == IA64_OPND_CR3 && (regno == CR_IIB0 || regno == CR_IIB1)) { specs[count] = tmpl; specs[count++].index = regno; } } break; case IA64_RS_CR_LRR: if (note != 1) { UNHANDLED; } else { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; if (idesc->operands[!rsrc_write] == IA64_OPND_CR3 && (regno == CR_LRR0 || regno == CR_LRR1)) { specs[count] = tmpl; specs[count++].index = regno; } } break; case IA64_RS_CR: if (note == 1) { if (idesc->operands[!rsrc_write] == IA64_OPND_CR3) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; } } else { UNHANDLED; } break; case IA64_RS_DAHR: if (note == 0) { if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR; } } else { UNHANDLED; } break; case IA64_RS_FR: case IA64_RS_FRb: if (note != 1) { UNHANDLED; } else if (rsrc_write) { if (dep->specifier == IA64_RS_FRb && idesc->operands[0] == IA64_OPND_F1) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR; } } else { for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++) { if (idesc->operands[i] == IA64_OPND_F2 || idesc->operands[i] == IA64_OPND_F3 || idesc->operands[i] == IA64_OPND_F4) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[i].X_add_number - REG_FR; } } } break; case IA64_RS_GR: if (note == 13) { /* This reference applies only to the GR whose value is loaded with data returned from memory. */ specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR; } else if (note == 1) { if (rsrc_write) { for (i = 0; i < idesc->num_outputs; i++) if (idesc->operands[i] == IA64_OPND_R1 || idesc->operands[i] == IA64_OPND_R2 || idesc->operands[i] == IA64_OPND_R3) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[i].X_add_number - REG_GR; } if (idesc->flags & IA64_OPCODE_POSTINC) for (i = 0; i < NELEMS (idesc->operands); i++) if (idesc->operands[i] == IA64_OPND_MR3) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[i].X_add_number - REG_GR; } } else { /* Look for anything that reads a GR. */ for (i = 0; i < NELEMS (idesc->operands); i++) { if (idesc->operands[i] == IA64_OPND_MR3 || idesc->operands[i] == IA64_OPND_CPUID_R3 || idesc->operands[i] == IA64_OPND_DBR_R3 || idesc->operands[i] == IA64_OPND_IBR_R3 || idesc->operands[i] == IA64_OPND_MSR_R3 || idesc->operands[i] == IA64_OPND_PKR_R3 || idesc->operands[i] == IA64_OPND_PMC_R3 || idesc->operands[i] == IA64_OPND_PMD_R3 || idesc->operands[i] == IA64_OPND_DAHR_R3 || idesc->operands[i] == IA64_OPND_RR_R3 || ((i >= idesc->num_outputs) && (idesc->operands[i] == IA64_OPND_R1 || idesc->operands[i] == IA64_OPND_R2 || idesc->operands[i] == IA64_OPND_R3 /* addl source register. */ || idesc->operands[i] == IA64_OPND_R3_2))) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.opnd[i].X_add_number - REG_GR; } } } } else { UNHANDLED; } break; /* This is the same as IA64_RS_PRr, except that the register range is from 1 - 15, and there are no rotating register reads/writes here. */ case IA64_RS_PR: if (note == 0) { for (i = 1; i < 16; i++) { specs[count] = tmpl; specs[count++].index = i; } } else if (note == 7) { valueT mask = 0; /* Mark only those registers indicated by the mask. */ if (rsrc_write) { mask = CURR_SLOT.opnd[2].X_add_number; for (i = 1; i < 16; i++) if (mask & ((valueT) 1 << i)) { specs[count] = tmpl; specs[count++].index = i; } } else { UNHANDLED; } } else if (note == 11) /* note 11 implies note 1 as well */ { if (rsrc_write) { for (i = 0; i < idesc->num_outputs; i++) { if (idesc->operands[i] == IA64_OPND_P1 || idesc->operands[i] == IA64_OPND_P2) { int regno = CURR_SLOT.opnd[i].X_add_number - REG_P; if (regno >= 1 && regno < 16) { specs[count] = tmpl; specs[count++].index = regno; } } } } else { UNHANDLED; } } else if (note == 12) { if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.qp_regno; } } else if (note == 1) { if (rsrc_write) { int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; int or_andcm = strstr (idesc->name, "or.andcm") != NULL; int and_orcm = strstr (idesc->name, "and.orcm") != NULL; if ((idesc->operands[0] == IA64_OPND_P1 || idesc->operands[0] == IA64_OPND_P2) && p1 >= 1 && p1 < 16) { specs[count] = tmpl; specs[count].cmp_type = (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE)); specs[count++].index = p1; } if ((idesc->operands[1] == IA64_OPND_P1 || idesc->operands[1] == IA64_OPND_P2) && p2 >= 1 && p2 < 16) { specs[count] = tmpl; specs[count].cmp_type = (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE)); specs[count++].index = p2; } } else { if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.qp_regno; } if (idesc->operands[1] == IA64_OPND_PR) { for (i = 1; i < 16; i++) { specs[count] = tmpl; specs[count++].index = i; } } } } else { UNHANDLED; } break; /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are simplified cases of this. */ case IA64_RS_PRr: if (note == 0) { for (i = 16; i < 63; i++) { specs[count] = tmpl; specs[count++].index = i; } } else if (note == 7) { valueT mask = 0; /* Mark only those registers indicated by the mask. */ if (rsrc_write && idesc->operands[0] == IA64_OPND_PR) { mask = CURR_SLOT.opnd[2].X_add_number; if (mask & ((valueT) 1 << 16)) for (i = 16; i < 63; i++) { specs[count] = tmpl; specs[count++].index = i; } } else if (rsrc_write && idesc->operands[0] == IA64_OPND_PR_ROT) { for (i = 16; i < 63; i++) { specs[count] = tmpl; specs[count++].index = i; } } else { UNHANDLED; } } else if (note == 11) /* note 11 implies note 1 as well */ { if (rsrc_write) { for (i = 0; i < idesc->num_outputs; i++) { if (idesc->operands[i] == IA64_OPND_P1 || idesc->operands[i] == IA64_OPND_P2) { int regno = CURR_SLOT.opnd[i].X_add_number - REG_P; if (regno >= 16 && regno < 63) { specs[count] = tmpl; specs[count++].index = regno; } } } } else { UNHANDLED; } } else if (note == 12) { if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.qp_regno; } } else if (note == 1) { if (rsrc_write) { int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; int or_andcm = strstr (idesc->name, "or.andcm") != NULL; int and_orcm = strstr (idesc->name, "and.orcm") != NULL; if ((idesc->operands[0] == IA64_OPND_P1 || idesc->operands[0] == IA64_OPND_P2) && p1 >= 16 && p1 < 63) { specs[count] = tmpl; specs[count].cmp_type = (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE)); specs[count++].index = p1; } if ((idesc->operands[1] == IA64_OPND_P1 || idesc->operands[1] == IA64_OPND_P2) && p2 >= 16 && p2 < 63) { specs[count] = tmpl; specs[count].cmp_type = (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE)); specs[count++].index = p2; } } else { if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63) { specs[count] = tmpl; specs[count++].index = CURR_SLOT.qp_regno; } if (idesc->operands[1] == IA64_OPND_PR) { for (i = 16; i < 63; i++) { specs[count] = tmpl; specs[count++].index = i; } } } } else { UNHANDLED; } break; case IA64_RS_PSR: /* Verify that the instruction is using the PSR bit indicated in dep->regindex. */ if (note == 0) { if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM) { if (dep->regindex < 6) { specs[count++] = tmpl; } } else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR) { if (dep->regindex < 32 || dep->regindex == 35 || dep->regindex == 36 || (!rsrc_write && dep->regindex == PSR_CPL)) { specs[count++] = tmpl; } } else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L) { if (dep->regindex < 32 || dep->regindex == 35 || dep->regindex == 36 || (rsrc_write && dep->regindex == PSR_CPL)) { specs[count++] = tmpl; } } else { /* Several PSR bits have very specific dependencies. */ switch (dep->regindex) { default: specs[count++] = tmpl; break; case PSR_IC: if (rsrc_write) { specs[count++] = tmpl; } else { /* Only certain CR accesses use PSR.ic */ if (idesc->operands[0] == IA64_OPND_CR3 || idesc->operands[1] == IA64_OPND_CR3) { int reg_index = ((idesc->operands[0] == IA64_OPND_CR3) ? 0 : 1); int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_CR; switch (regno) { default: break; case CR_ITIR: case CR_IFS: case CR_IIM: case CR_IIP: case CR_IPSR: case CR_ISR: case CR_IFA: case CR_IHA: case CR_IIB0: case CR_IIB1: case CR_IIPA: specs[count++] = tmpl; break; } } } break; case PSR_CPL: if (rsrc_write) { specs[count++] = tmpl; } else { /* Only some AR accesses use cpl */ if (idesc->operands[0] == IA64_OPND_AR3 || idesc->operands[1] == IA64_OPND_AR3) { int reg_index = ((idesc->operands[0] == IA64_OPND_AR3) ? 0 : 1); int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_AR; if (regno == AR_ITC || regno == AR_RUC || (reg_index == 0 && (regno == AR_RSC || (regno >= AR_K0 && regno <= AR_K7)))) { specs[count++] = tmpl; } } else { specs[count++] = tmpl; } break; } } } } else if (note == 7) { valueT mask = 0; if (idesc->operands[0] == IA64_OPND_IMMU24) { mask = CURR_SLOT.opnd[0].X_add_number; } else { UNHANDLED; } if (mask & ((valueT) 1 << dep->regindex)) { specs[count++] = tmpl; } } else if (note == 8) { int min = dep->regindex == PSR_DFL ? 2 : 32; int max = dep->regindex == PSR_DFL ? 31 : 127; /* dfh is read on FR32-127; dfl is read on FR2-31 */ for (i = 0; i < NELEMS (idesc->operands); i++) { if (idesc->operands[i] == IA64_OPND_F1 || idesc->operands[i] == IA64_OPND_F2 || idesc->operands[i] == IA64_OPND_F3 || idesc->operands[i] == IA64_OPND_F4) { int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR; if (reg >= min && reg <= max) { specs[count++] = tmpl; } } } } else if (note == 9) { int min = dep->regindex == PSR_MFL ? 2 : 32; int max = dep->regindex == PSR_MFL ? 31 : 127; /* mfh is read on writes to FR32-127; mfl is read on writes to FR2-31 */ for (i = 0; i < idesc->num_outputs; i++) { if (idesc->operands[i] == IA64_OPND_F1) { int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR; if (reg >= min && reg <= max) { specs[count++] = tmpl; } } } } else if (note == 10) { for (i = 0; i < NELEMS (idesc->operands); i++) { if (idesc->operands[i] == IA64_OPND_R1 || idesc->operands[i] == IA64_OPND_R2 || idesc->operands[i] == IA64_OPND_R3) { int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR; if (regno >= 16 && regno <= 31) { specs[count++] = tmpl; } } } } else { UNHANDLED; } break; case IA64_RS_AR_FPSR: if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; if (regno == AR_FPSR) { specs[count++] = tmpl; } } else { specs[count++] = tmpl; } break; case IA64_RS_ARX: /* Handle all AR[REG] resources */ if (note == 0 || note == 1) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; if (idesc->operands[!rsrc_write] == IA64_OPND_AR3 && regno == dep->regindex) { specs[count++] = tmpl; } /* other AR[REG] resources may be affected by AR accesses */ else if (idesc->operands[0] == IA64_OPND_AR3) { /* AR[] writes */ regno = CURR_SLOT.opnd[0].X_add_number - REG_AR; switch (dep->regindex) { default: break; case AR_BSP: case AR_RNAT: if (regno == AR_BSPSTORE) { specs[count++] = tmpl; } case AR_RSC: if (!rsrc_write && (regno == AR_BSPSTORE || regno == AR_RNAT)) { specs[count++] = tmpl; } break; } } else if (idesc->operands[1] == IA64_OPND_AR3) { /* AR[] reads */ regno = CURR_SLOT.opnd[1].X_add_number - REG_AR; switch (dep->regindex) { default: break; case AR_RSC: if (regno == AR_BSPSTORE || regno == AR_RNAT) { specs[count++] = tmpl; } break; } } else { specs[count++] = tmpl; } } else { UNHANDLED; } break; case IA64_RS_CRX: /* Handle all CR[REG] resources. ??? FIXME: The rule 17 isn't really handled correctly. */ if (note == 0 || note == 1 || note == 17) { if (idesc->operands[!rsrc_write] == IA64_OPND_CR3) { int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; if (regno == dep->regindex) { specs[count++] = tmpl; } else if (!rsrc_write) { /* Reads from CR[IVR] affect other resources. */ if (regno == CR_IVR) { if ((dep->regindex >= CR_IRR0 && dep->regindex <= CR_IRR3) || dep->regindex == CR_TPR) { specs[count++] = tmpl; } } } } else { specs[count++] = tmpl; } } else { UNHANDLED; } break; case IA64_RS_INSERVICE: /* look for write of EOI (67) or read of IVR (65) */ if ((idesc->operands[0] == IA64_OPND_CR3 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI) || (idesc->operands[1] == IA64_OPND_CR3 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR)) { specs[count++] = tmpl; } break; case IA64_RS_GR0: if (note == 1) { specs[count++] = tmpl; } else { UNHANDLED; } break; case IA64_RS_CFM: if (note != 2) { specs[count++] = tmpl; } else { /* Check if any of the registers accessed are in the rotating region. mov to/from pr accesses CFM only when qp_regno is in the rotating region */ for (i = 0; i < NELEMS (idesc->operands); i++) { if (idesc->operands[i] == IA64_OPND_R1 || idesc->operands[i] == IA64_OPND_R2 || idesc->operands[i] == IA64_OPND_R3) { int num = CURR_SLOT.opnd[i].X_add_number - REG_GR; /* Assumes that md.rot.num_regs is always valid */ if (md.rot.num_regs > 0 && num > 31 && num < 31 + md.rot.num_regs) { specs[count] = tmpl; specs[count++].specific = 0; } } else if (idesc->operands[i] == IA64_OPND_F1 || idesc->operands[i] == IA64_OPND_F2 || idesc->operands[i] == IA64_OPND_F3 || idesc->operands[i] == IA64_OPND_F4) { int num = CURR_SLOT.opnd[i].X_add_number - REG_FR; if (num > 31) { specs[count] = tmpl; specs[count++].specific = 0; } } else if (idesc->operands[i] == IA64_OPND_P1 || idesc->operands[i] == IA64_OPND_P2) { int num = CURR_SLOT.opnd[i].X_add_number - REG_P; if (num > 15) { specs[count] = tmpl; specs[count++].specific = 0; } } } if (CURR_SLOT.qp_regno > 15) { specs[count] = tmpl; specs[count++].specific = 0; } } break; /* This is the same as IA64_RS_PRr, except simplified to account for the fact that there is only one register. */ case IA64_RS_PR63: if (note == 0) { specs[count++] = tmpl; } else if (note == 7) { valueT mask = 0; if (idesc->operands[2] == IA64_OPND_IMM17) mask = CURR_SLOT.opnd[2].X_add_number; if (mask & ((valueT) 1 << 63)) specs[count++] = tmpl; } else if (note == 11) { if ((idesc->operands[0] == IA64_OPND_P1 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63) || (idesc->operands[1] == IA64_OPND_P2 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63)) { specs[count++] = tmpl; } } else if (note == 12) { if (CURR_SLOT.qp_regno == 63) { specs[count++] = tmpl; } } else if (note == 1) { if (rsrc_write) { int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; int or_andcm = strstr (idesc->name, "or.andcm") != NULL; int and_orcm = strstr (idesc->name, "and.orcm") != NULL; if (p1 == 63 && (idesc->operands[0] == IA64_OPND_P1 || idesc->operands[0] == IA64_OPND_P2)) { specs[count] = tmpl; specs[count++].cmp_type = (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE)); } if (p2 == 63 && (idesc->operands[1] == IA64_OPND_P1 || idesc->operands[1] == IA64_OPND_P2)) { specs[count] = tmpl; specs[count++].cmp_type = (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE)); } } else { if (CURR_SLOT.qp_regno == 63) { specs[count++] = tmpl; } } } else { UNHANDLED; } break; case IA64_RS_RSE: /* FIXME we can identify some individual RSE written resources, but RSE read resources have not yet been completely identified, so for now treat RSE as a single resource */ if (strncmp (idesc->name, "mov", 3) == 0) { if (rsrc_write) { if (idesc->operands[0] == IA64_OPND_AR3 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE) { specs[count++] = tmpl; } } else { if (idesc->operands[0] == IA64_OPND_AR3) { if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT) { specs[count++] = tmpl; } } else if (idesc->operands[1] == IA64_OPND_AR3) { if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT) { specs[count++] = tmpl; } } } } else { specs[count++] = tmpl; } break; case IA64_RS_ANY: /* FIXME -- do any of these need to be non-specific? */ specs[count++] = tmpl; break; default: as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier); break; } return count; } /* Clear branch flags on marked resources. This breaks the link between the QP of the marking instruction and a subsequent branch on the same QP. */ static void clear_qp_branch_flag (valueT mask) { int i; for (i = 0; i < regdepslen; i++) { valueT bit = ((valueT) 1 << regdeps[i].qp_regno); if ((bit & mask) != 0) { regdeps[i].link_to_qp_branch = 0; } } } /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove any mutexes which contain one of the PRs and create new ones when needed. */ static int update_qp_mutex (valueT mask) { int i; int add = 0; i = 0; while (i < qp_mutexeslen) { if ((qp_mutexes[i].prmask & mask) != 0) { /* If it destroys and creates the same mutex, do nothing. */ if (qp_mutexes[i].prmask == mask && qp_mutexes[i].path == md.path) { i++; add = -1; } else { int keep = 0; if (md.debug_dv) { fprintf (stderr, " Clearing mutex relation"); print_prmask (qp_mutexes[i].prmask); fprintf (stderr, "\n"); } /* Deal with the old mutex with more than 3+ PRs only if the new mutex on the same execution path with it. FIXME: The 3+ mutex support is incomplete. dot_pred_rel () may be a better place to fix it. */ if (qp_mutexes[i].path == md.path) { /* If it is a proper subset of the mutex, create a new mutex. */ if (add == 0 && (qp_mutexes[i].prmask & mask) == mask) add = 1; qp_mutexes[i].prmask &= ~mask; if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1)) { /* Modify the mutex if there are more than one PR left. */ keep = 1; i++; } } if (keep == 0) /* Remove the mutex. */ qp_mutexes[i] = qp_mutexes[--qp_mutexeslen]; } } else ++i; } if (add == 1) add_qp_mutex (mask); return add; } /* Remove any mutexes which contain any of the PRs indicated in the mask. Any changes to a PR clears the mutex relations which include that PR. */ static void clear_qp_mutex (valueT mask) { int i; i = 0; while (i < qp_mutexeslen) { if ((qp_mutexes[i].prmask & mask) != 0) { if (md.debug_dv) { fprintf (stderr, " Clearing mutex relation"); print_prmask (qp_mutexes[i].prmask); fprintf (stderr, "\n"); } qp_mutexes[i] = qp_mutexes[--qp_mutexeslen]; } else ++i; } } /* Clear implies relations which contain PRs in the given masks. P1_MASK indicates the source of the implies relation, while P2_MASK indicates the implied PR. */ static void clear_qp_implies (valueT p1_mask, valueT p2_mask) { int i; i = 0; while (i < qp_implieslen) { if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0) { if (md.debug_dv) fprintf (stderr, "Clearing implied relation PR%d->PR%d\n", qp_implies[i].p1, qp_implies[i].p2); qp_implies[i] = qp_implies[--qp_implieslen]; } else ++i; } } /* Add the PRs specified to the list of implied relations. */ static void add_qp_imply (int p1, int p2) { valueT mask; valueT bit; int i; /* p0 is not meaningful here. */ if (p1 == 0 || p2 == 0) abort (); if (p1 == p2) return; /* If it exists already, ignore it. */ for (i = 0; i < qp_implieslen; i++) { if (qp_implies[i].p1 == p1 && qp_implies[i].p2 == p2 && qp_implies[i].path == md.path && !qp_implies[i].p2_branched) return; } if (qp_implieslen == qp_impliestotlen) { qp_impliestotlen += 20; qp_implies = (struct qp_imply *) xrealloc ((void *) qp_implies, qp_impliestotlen * sizeof (struct qp_imply)); } if (md.debug_dv) fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2); qp_implies[qp_implieslen].p1 = p1; qp_implies[qp_implieslen].p2 = p2; qp_implies[qp_implieslen].path = md.path; qp_implies[qp_implieslen++].p2_branched = 0; /* Add in the implied transitive relations; for everything that p2 implies, make p1 imply that, too; for everything that implies p1, make it imply p2 as well. */ for (i = 0; i < qp_implieslen; i++) { if (qp_implies[i].p1 == p2) add_qp_imply (p1, qp_implies[i].p2); if (qp_implies[i].p2 == p1) add_qp_imply (qp_implies[i].p1, p2); } /* Add in mutex relations implied by this implies relation; for each mutex relation containing p2, duplicate it and replace p2 with p1. */ bit = (valueT) 1 << p1; mask = (valueT) 1 << p2; for (i = 0; i < qp_mutexeslen; i++) { if (qp_mutexes[i].prmask & mask) add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit); } } /* Add the PRs specified in the mask to the mutex list; this means that only one of the PRs can be true at any time. PR0 should never be included in the mask. */ static void add_qp_mutex (valueT mask) { if (mask & 0x1) abort (); if (qp_mutexeslen == qp_mutexestotlen) { qp_mutexestotlen += 20; qp_mutexes = (struct qpmutex *) xrealloc ((void *) qp_mutexes, qp_mutexestotlen * sizeof (struct qpmutex)); } if (md.debug_dv) { fprintf (stderr, " Registering mutex on"); print_prmask (mask); fprintf (stderr, "\n"); } qp_mutexes[qp_mutexeslen].path = md.path; qp_mutexes[qp_mutexeslen++].prmask = mask; } static int has_suffix_p (const char *name, const char *suffix) { size_t namelen = strlen (name); size_t sufflen = strlen (suffix); if (namelen <= sufflen) return 0; return strcmp (name + namelen - sufflen, suffix) == 0; } static void clear_register_values (void) { int i; if (md.debug_dv) fprintf (stderr, " Clearing register values\n"); for (i = 1; i < NELEMS (gr_values); i++) gr_values[i].known = 0; } /* Keep track of register values/changes which affect DV tracking. optimization note: should add a flag to classes of insns where otherwise we have to examine a group of strings to identify them. */ static void note_register_values (struct ia64_opcode *idesc) { valueT qp_changemask = 0; int i; /* Invalidate values for registers being written to. */ for (i = 0; i < idesc->num_outputs; i++) { if (idesc->operands[i] == IA64_OPND_R1 || idesc->operands[i] == IA64_OPND_R2 || idesc->operands[i] == IA64_OPND_R3) { int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR; if (regno > 0 && regno < NELEMS (gr_values)) gr_values[regno].known = 0; } else if (idesc->operands[i] == IA64_OPND_R3_2) { int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR; if (regno > 0 && regno < 4) gr_values[regno].known = 0; } else if (idesc->operands[i] == IA64_OPND_P1 || idesc->operands[i] == IA64_OPND_P2) { int regno = CURR_SLOT.opnd[i].X_add_number - REG_P; qp_changemask |= (valueT) 1 << regno; } else if (idesc->operands[i] == IA64_OPND_PR) { if (idesc->operands[2] & (valueT) 0x10000) qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2]; else qp_changemask = idesc->operands[2]; break; } else if (idesc->operands[i] == IA64_OPND_PR_ROT) { if (idesc->operands[1] & ((valueT) 1 << 43)) qp_changemask = -((valueT) 1 << 44) | idesc->operands[1]; else qp_changemask = idesc->operands[1]; qp_changemask &= ~(valueT) 0xFFFF; break; } } /* Always clear qp branch flags on any PR change. */ /* FIXME there may be exceptions for certain compares. */ clear_qp_branch_flag (qp_changemask); /* Invalidate rotating registers on insns which affect RRBs in CFM. */ if (idesc->flags & IA64_OPCODE_MOD_RRBS) { qp_changemask |= ~(valueT) 0xFFFF; if (strcmp (idesc->name, "clrrrb.pr") != 0) { for (i = 32; i < 32 + md.rot.num_regs; i++) gr_values[i].known = 0; } clear_qp_mutex (qp_changemask); clear_qp_implies (qp_changemask, qp_changemask); } /* After a call, all register values are undefined, except those marked as "safe". */ else if (strncmp (idesc->name, "br.call", 6) == 0 || strncmp (idesc->name, "brl.call", 7) == 0) { /* FIXME keep GR values which are marked as "safe_across_calls" */ clear_register_values (); clear_qp_mutex (~qp_safe_across_calls); clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls); clear_qp_branch_flag (~qp_safe_across_calls); } else if (is_interruption_or_rfi (idesc) || is_taken_branch (idesc)) { clear_register_values (); clear_qp_mutex (~(valueT) 0); clear_qp_implies (~(valueT) 0, ~(valueT) 0); } /* Look for mutex and implies relations. */ else if ((idesc->operands[0] == IA64_OPND_P1 || idesc->operands[0] == IA64_OPND_P2) && (idesc->operands[1] == IA64_OPND_P1 || idesc->operands[1] == IA64_OPND_P2)) { int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0; valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0; /* If both PRs are PR0, we can't really do anything. */ if (p1 == 0 && p2 == 0) { if (md.debug_dv) fprintf (stderr, " Ignoring PRs due to inclusion of p0\n"); } /* In general, clear mutexes and implies which include P1 or P2, with the following exceptions. */ else if (has_suffix_p (idesc->name, ".or.andcm") || has_suffix_p (idesc->name, ".and.orcm")) { clear_qp_implies (p2mask, p1mask); } else if (has_suffix_p (idesc->name, ".andcm") || has_suffix_p (idesc->name, ".and")) { clear_qp_implies (0, p1mask | p2mask); } else if (has_suffix_p (idesc->name, ".orcm") || has_suffix_p (idesc->name, ".or")) { clear_qp_mutex (p1mask | p2mask); clear_qp_implies (p1mask | p2mask, 0); } else { int added = 0; clear_qp_implies (p1mask | p2mask, p1mask | p2mask); /* If one of the PRs is PR0, we call clear_qp_mutex. */ if (p1 == 0 || p2 == 0) clear_qp_mutex (p1mask | p2mask); else added = update_qp_mutex (p1mask | p2mask); if (CURR_SLOT.qp_regno == 0 || has_suffix_p (idesc->name, ".unc")) { if (added == 0 && p1 && p2) add_qp_mutex (p1mask | p2mask); if (CURR_SLOT.qp_regno != 0) { if (p1) add_qp_imply (p1, CURR_SLOT.qp_regno); if (p2) add_qp_imply (p2, CURR_SLOT.qp_regno); } } } } /* Look for mov imm insns into GRs. */ else if (idesc->operands[0] == IA64_OPND_R1 && (idesc->operands[1] == IA64_OPND_IMM22 || idesc->operands[1] == IA64_OPND_IMMU64) && CURR_SLOT.opnd[1].X_op == O_constant && (strcmp (idesc->name, "mov") == 0 || strcmp (idesc->name, "movl") == 0)) { int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR; if (regno > 0 && regno < NELEMS (gr_values)) { gr_values[regno].known = 1; gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number; gr_values[regno].path = md.path; if (md.debug_dv) { fprintf (stderr, " Know gr%d = ", regno); fprintf_vma (stderr, gr_values[regno].value); fputs ("\n", stderr); } } } /* Look for dep.z imm insns. */ else if (idesc->operands[0] == IA64_OPND_R1 && idesc->operands[1] == IA64_OPND_IMM8 && strcmp (idesc->name, "dep.z") == 0) { int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR; if (regno > 0 && regno < NELEMS (gr_values)) { valueT value = CURR_SLOT.opnd[1].X_add_number; if (CURR_SLOT.opnd[3].X_add_number < 64) value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1; value <<= CURR_SLOT.opnd[2].X_add_number; gr_values[regno].known = 1; gr_values[regno].value = value; gr_values[regno].path = md.path; if (md.debug_dv) { fprintf (stderr, " Know gr%d = ", regno); fprintf_vma (stderr, gr_values[regno].value); fputs ("\n", stderr); } } } else { clear_qp_mutex (qp_changemask); clear_qp_implies (qp_changemask, qp_changemask); } } /* Return whether the given predicate registers are currently mutex. */ static int qp_mutex (int p1, int p2, int path) { int i; valueT mask; if (p1 != p2) { mask = ((valueT) 1 << p1) | (valueT) 1 << p2; for (i = 0; i < qp_mutexeslen; i++) { if (qp_mutexes[i].path >= path && (qp_mutexes[i].prmask & mask) == mask) return 1; } } return 0; } /* Return whether the given resource is in the given insn's list of chks Return 1 if the conflict is absolutely determined, 2 if it's a potential conflict. */ static int resources_match (struct rsrc *rs, struct ia64_opcode *idesc, int note, int qp_regno, int path) { struct rsrc specs[MAX_SPECS]; int count; /* If the marked resource's qp_regno and the given qp_regno are mutex, we don't need to check. One exception is note 11, which indicates that target predicates are written regardless of PR[qp]. */ if (qp_mutex (rs->qp_regno, qp_regno, path) && note != 11) return 0; count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path); while (count-- > 0) { /* UNAT checking is a bit more specific than other resources */ if (rs->dependency->specifier == IA64_RS_AR_UNAT && specs[count].mem_offset.hint && rs->mem_offset.hint) { if (rs->mem_offset.base == specs[count].mem_offset.base) { if (((rs->mem_offset.offset >> 3) & 0x3F) == ((specs[count].mem_offset.offset >> 3) & 0x3F)) return 1; else continue; } } /* Skip apparent PR write conflicts where both writes are an AND or both writes are an OR. */ if (rs->dependency->specifier == IA64_RS_PR || rs->dependency->specifier == IA64_RS_PRr || rs->dependency->specifier == IA64_RS_PR63) { if (specs[count].cmp_type != CMP_NONE && specs[count].cmp_type == rs->cmp_type) { if (md.debug_dv) fprintf (stderr, " %s on parallel compare allowed (PR%d)\n", dv_mode[rs->dependency->mode], rs->dependency->specifier != IA64_RS_PR63 ? specs[count].index : 63); continue; } if (md.debug_dv) fprintf (stderr, " %s on parallel compare conflict %s vs %s on PR%d\n", dv_mode[rs->dependency->mode], dv_cmp_type[rs->cmp_type], dv_cmp_type[specs[count].cmp_type], rs->dependency->specifier != IA64_RS_PR63 ? specs[count].index : 63); } /* If either resource is not specific, conservatively assume a conflict */ if (!specs[count].specific || !rs->specific) return 2; else if (specs[count].index == rs->index) return 1; } return 0; } /* Indicate an instruction group break; if INSERT_STOP is non-zero, then insert a stop to create the break. Update all resource dependencies appropriately. If QP_REGNO is non-zero, only apply the break to resources which use the same QP_REGNO and have the link_to_qp_branch flag set. If SAVE_CURRENT is non-zero, don't affect resources marked by the current instruction. */ static void insn_group_break (int insert_stop, int qp_regno, int save_current) { int i; if (insert_stop && md.num_slots_in_use > 0) PREV_SLOT.end_of_insn_group = 1; if (md.debug_dv) { fprintf (stderr, " Insn group break%s", (insert_stop ? " (w/stop)" : "")); if (qp_regno != 0) fprintf (stderr, " effective for QP=%d", qp_regno); fprintf (stderr, "\n"); } i = 0; while (i < regdepslen) { const struct ia64_dependency *dep = regdeps[i].dependency; if (qp_regno != 0 && regdeps[i].qp_regno != qp_regno) { ++i; continue; } if (save_current && CURR_SLOT.src_file == regdeps[i].file && CURR_SLOT.src_line == regdeps[i].line) { ++i; continue; } /* clear dependencies which are automatically cleared by a stop, or those that have reached the appropriate state of insn serialization */ if (dep->semantics == IA64_DVS_IMPLIED || dep->semantics == IA64_DVS_IMPLIEDF || regdeps[i].insn_srlz == STATE_SRLZ) { print_dependency ("Removing", i); regdeps[i] = regdeps[--regdepslen]; } else { if (dep->semantics == IA64_DVS_DATA || dep->semantics == IA64_DVS_INSTR || dep->semantics == IA64_DVS_SPECIFIC) { if (regdeps[i].insn_srlz == STATE_NONE) regdeps[i].insn_srlz = STATE_STOP; if (regdeps[i].data_srlz == STATE_NONE) regdeps[i].data_srlz = STATE_STOP; } ++i; } } } /* Add the given resource usage spec to the list of active dependencies. */ static void mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED, const struct ia64_dependency *dep ATTRIBUTE_UNUSED, struct rsrc *spec, int depind, int path) { if (regdepslen == regdepstotlen) { regdepstotlen += 20; regdeps = (struct rsrc *) xrealloc ((void *) regdeps, regdepstotlen * sizeof (struct rsrc)); } regdeps[regdepslen] = *spec; regdeps[regdepslen].depind = depind; regdeps[regdepslen].path = path; regdeps[regdepslen].file = CURR_SLOT.src_file; regdeps[regdepslen].line = CURR_SLOT.src_line; print_dependency ("Adding", regdepslen); ++regdepslen; } static void print_dependency (const char *action, int depind) { if (md.debug_dv) { fprintf (stderr, " %s %s '%s'", action, dv_mode[(regdeps[depind].dependency)->mode], (regdeps[depind].dependency)->name); if (regdeps[depind].specific && regdeps[depind].index >= 0) fprintf (stderr, " (%d)", regdeps[depind].index); if (regdeps[depind].mem_offset.hint) { fputs (" ", stderr); fprintf_vma (stderr, regdeps[depind].mem_offset.base); fputs ("+", stderr); fprintf_vma (stderr, regdeps[depind].mem_offset.offset); } fprintf (stderr, "\n"); } } static void instruction_serialization (void) { int i; if (md.debug_dv) fprintf (stderr, " Instruction serialization\n"); for (i = 0; i < regdepslen; i++) if (regdeps[i].insn_srlz == STATE_STOP) regdeps[i].insn_srlz = STATE_SRLZ; } static void data_serialization (void) { int i = 0; if (md.debug_dv) fprintf (stderr, " Data serialization\n"); while (i < regdepslen) { if (regdeps[i].data_srlz == STATE_STOP /* Note: as of 991210, all "other" dependencies are cleared by a data serialization. This might change with new tables */ || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER) { print_dependency ("Removing", i); regdeps[i] = regdeps[--regdepslen]; } else ++i; } } /* Insert stops and serializations as needed to avoid DVs. */ static void remove_marked_resource (struct rsrc *rs) { switch (rs->dependency->semantics) { case IA64_DVS_SPECIFIC: if (md.debug_dv) fprintf (stderr, "Implementation-specific, assume worst case...\n"); /* ...fall through... */ case IA64_DVS_INSTR: if (md.debug_dv) fprintf (stderr, "Inserting instr serialization\n"); if (rs->insn_srlz < STATE_STOP) insn_group_break (1, 0, 0); if (rs->insn_srlz < STATE_SRLZ) { struct slot oldslot = CURR_SLOT; /* Manually jam a srlz.i insn into the stream */ memset (&CURR_SLOT, 0, sizeof (CURR_SLOT)); CURR_SLOT.user_template = -1; CURR_SLOT.idesc = ia64_find_opcode ("srlz.i"); instruction_serialization (); md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS; if (++md.num_slots_in_use >= NUM_SLOTS) emit_one_bundle (); CURR_SLOT = oldslot; } insn_group_break (1, 0, 0); break; case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all "other" types of DV are eliminated by a data serialization */ case IA64_DVS_DATA: if (md.debug_dv) fprintf (stderr, "Inserting data serialization\n"); if (rs->data_srlz < STATE_STOP) insn_group_break (1, 0, 0); { struct slot oldslot = CURR_SLOT; /* Manually jam a srlz.d insn into the stream */ memset (&CURR_SLOT, 0, sizeof (CURR_SLOT)); CURR_SLOT.user_template = -1; CURR_SLOT.idesc = ia64_find_opcode ("srlz.d"); data_serialization (); md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS; if (++md.num_slots_in_use >= NUM_SLOTS) emit_one_bundle (); CURR_SLOT = oldslot; } break; case IA64_DVS_IMPLIED: case IA64_DVS_IMPLIEDF: if (md.debug_dv) fprintf (stderr, "Inserting stop\n"); insn_group_break (1, 0, 0); break; default: break; } } /* Check the resources used by the given opcode against the current dependency list. The check is run once for each execution path encountered. In this case, a unique execution path is the sequence of instructions following a code entry point, e.g. the following has three execution paths, one starting at L0, one at L1, and one at L2. L0: nop L1: add L2: add br.ret */ static void check_dependencies (struct ia64_opcode *idesc) { const struct ia64_opcode_dependency *opdeps = idesc->dependencies; int path; int i; /* Note that the number of marked resources may change within the loop if in auto mode. */ i = 0; while (i < regdepslen) { struct rsrc *rs = &regdeps[i]; const struct ia64_dependency *dep = rs->dependency; int chkind; int note; int start_over = 0; if (dep->semantics == IA64_DVS_NONE || (chkind = depends_on (rs->depind, idesc)) == -1) { ++i; continue; } note = NOTE (opdeps->chks[chkind]); /* Check this resource against each execution path seen thus far. */ for (path = 0; path <= md.path; path++) { int matchtype; /* If the dependency wasn't on the path being checked, ignore it. */ if (rs->path < path) continue; /* If the QP for this insn implies a QP which has branched, don't bother checking. Ed. NOTE: I don't think this check is terribly useful; what's the point of generating code which will only be reached if its QP is zero? This code was specifically inserted to handle the following code, based on notes from Intel's DV checking code, where p1 implies p2. mov r4 = 2 (p2) br.cond L (p1) mov r4 = 7 */ if (CURR_SLOT.qp_regno != 0) { int skip = 0; int implies; for (implies = 0; implies < qp_implieslen; implies++) { if (qp_implies[implies].path >= path && qp_implies[implies].p1 == CURR_SLOT.qp_regno && qp_implies[implies].p2_branched) { skip = 1; break; } } if (skip) continue; } if ((matchtype = resources_match (rs, idesc, note, CURR_SLOT.qp_regno, path)) != 0) { char msg[1024]; char pathmsg[256] = ""; char indexmsg[256] = ""; int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0); if (path != 0) snprintf (pathmsg, sizeof (pathmsg), " when entry is at label '%s'", md.entry_labels[path - 1]); if (matchtype == 1 && rs->index >= 0) snprintf (indexmsg, sizeof (indexmsg), ", specific resource number is %d", rs->index); snprintf (msg, sizeof (msg), "Use of '%s' %s %s dependency '%s' (%s)%s%s", idesc->name, (certain ? "violates" : "may violate"), dv_mode[dep->mode], dep->name, dv_sem[dep->semantics], pathmsg, indexmsg); if (md.explicit_mode) { as_warn ("%s", msg); if (path < md.path) as_warn (_("Only the first path encountering the conflict is reported")); as_warn_where (rs->file, rs->line, _("This is the location of the conflicting usage")); /* Don't bother checking other paths, to avoid duplicating the same warning */ break; } else { if (md.debug_dv) fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line); remove_marked_resource (rs); /* since the set of dependencies has changed, start over */ /* FIXME -- since we're removing dvs as we go, we probably don't really need to start over... */ start_over = 1; break; } } } if (start_over) i = 0; else ++i; } } /* Register new dependencies based on the given opcode. */ static void mark_resources (struct ia64_opcode *idesc) { int i; const struct ia64_opcode_dependency *opdeps = idesc->dependencies; int add_only_qp_reads = 0; /* A conditional branch only uses its resources if it is taken; if it is taken, we stop following that path. The other branch types effectively *always* write their resources. If it's not taken, register only QP reads. */ if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc)) { add_only_qp_reads = 1; } if (md.debug_dv) fprintf (stderr, "Registering '%s' resource usage\n", idesc->name); for (i = 0; i < opdeps->nregs; i++) { const struct ia64_dependency *dep; struct rsrc specs[MAX_SPECS]; int note; int path; int count; dep = ia64_find_dependency (opdeps->regs[i]); note = NOTE (opdeps->regs[i]); if (add_only_qp_reads && !(dep->mode == IA64_DV_WAR && (dep->specifier == IA64_RS_PR || dep->specifier == IA64_RS_PRr || dep->specifier == IA64_RS_PR63))) continue; count = specify_resource (dep, idesc, DV_REG, specs, note, md.path); while (count-- > 0) { mark_resource (idesc, dep, &specs[count], DEP (opdeps->regs[i]), md.path); } /* The execution path may affect register values, which may in turn affect which indirect-access resources are accessed. */ switch (dep->specifier) { default: break; case IA64_RS_CPUID: case IA64_RS_DBR: case IA64_RS_IBR: case IA64_RS_MSR: case IA64_RS_PKR: case IA64_RS_PMC: case IA64_RS_PMD: case IA64_RS_RR: for (path = 0; path < md.path; path++) { count = specify_resource (dep, idesc, DV_REG, specs, note, path); while (count-- > 0) mark_resource (idesc, dep, &specs[count], DEP (opdeps->regs[i]), path); } break; } } } /* Remove dependencies when they no longer apply. */ static void update_dependencies (struct ia64_opcode *idesc) { int i; if (strcmp (idesc->name, "srlz.i") == 0) { instruction_serialization (); } else if (strcmp (idesc->name, "srlz.d") == 0) { data_serialization (); } else if (is_interruption_or_rfi (idesc) || is_taken_branch (idesc)) { /* Although technically the taken branch doesn't clear dependencies which require a srlz.[id], we don't follow the branch; the next instruction is assumed to start with a clean slate. */ regdepslen = 0; md.path = 0; } else if (is_conditional_branch (idesc) && CURR_SLOT.qp_regno != 0) { int is_call = strstr (idesc->name, ".call") != NULL; for (i = 0; i < qp_implieslen; i++) { /* If the conditional branch's predicate is implied by the predicate in an existing dependency, remove that dependency. */ if (qp_implies[i].p2 == CURR_SLOT.qp_regno) { int depind = 0; /* Note that this implied predicate takes a branch so that if a later insn generates a DV but its predicate implies this one, we can avoid the false DV warning. */ qp_implies[i].p2_branched = 1; while (depind < regdepslen) { if (regdeps[depind].qp_regno == qp_implies[i].p1) { print_dependency ("Removing", depind); regdeps[depind] = regdeps[--regdepslen]; } else ++depind; } } } /* Any marked resources which have this same predicate should be cleared, provided that the QP hasn't been modified between the marking instruction and the branch. */ if (is_call) { insn_group_break (0, CURR_SLOT.qp_regno, 1); } else { i = 0; while (i < regdepslen) { if (regdeps[i].qp_regno == CURR_SLOT.qp_regno && regdeps[i].link_to_qp_branch && (regdeps[i].file != CURR_SLOT.src_file || regdeps[i].line != CURR_SLOT.src_line)) { /* Treat like a taken branch */ print_dependency ("Removing", i); regdeps[i] = regdeps[--regdepslen]; } else ++i; } } } } /* Examine the current instruction for dependency violations. */ static int check_dv (struct ia64_opcode *idesc) { if (md.debug_dv) { fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n", idesc->name, CURR_SLOT.src_line, idesc->dependencies->nchks, idesc->dependencies->nregs); } /* Look through the list of currently marked resources; if the current instruction has the dependency in its chks list which uses that resource, check against the specific resources used. */ check_dependencies (idesc); /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads), then add them to the list of marked resources. */ mark_resources (idesc); /* There are several types of dependency semantics, and each has its own requirements for being cleared Instruction serialization (insns separated by interruption, rfi, or writer + srlz.i + reader, all in separate groups) clears DVS_INSTR. Data serialization (instruction serialization, or writer + srlz.d + reader, where writer and srlz.d are in separate groups) clears DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to always be the case). Instruction group break (groups separated by stop, taken branch, interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF. */ update_dependencies (idesc); /* Sometimes, knowing a register value allows us to avoid giving a false DV warning. Keep track of as many as possible that are useful. */ note_register_values (idesc); /* We don't need or want this anymore. */ md.mem_offset.hint = 0; return 0; } /* Translate one line of assembly. Pseudo ops and labels do not show here. */ void md_assemble (char *str) { char *saved_input_line_pointer, *mnemonic; const struct pseudo_opcode *pdesc; struct ia64_opcode *idesc; unsigned char qp_regno; unsigned int flags; int ch; saved_input_line_pointer = input_line_pointer; input_line_pointer = str; /* extract the opcode (mnemonic): */ ch = get_symbol_name (&mnemonic); pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic); if (pdesc) { (void) restore_line_pointer (ch); (*pdesc->handler) (pdesc->arg); goto done; } /* Find the instruction descriptor matching the arguments. */ idesc = ia64_find_opcode (mnemonic); (void) restore_line_pointer (ch); if (!idesc) { as_bad (_("Unknown opcode `%s'"), mnemonic); goto done; } idesc = parse_operands (idesc); if (!idesc) goto done; /* Handle the dynamic ops we can handle now: */ if (idesc->type == IA64_TYPE_DYN) { if (strcmp (idesc->name, "add") == 0) { if (CURR_SLOT.opnd[2].X_op == O_register && CURR_SLOT.opnd[2].X_add_number < 4) mnemonic = "addl"; else mnemonic = "adds"; ia64_free_opcode (idesc); idesc = ia64_find_opcode (mnemonic); } else if (strcmp (idesc->name, "mov") == 0) { enum ia64_opnd opnd1, opnd2; int rop; opnd1 = idesc->operands[0]; opnd2 = idesc->operands[1]; if (opnd1 == IA64_OPND_AR3) rop = 0; else if (opnd2 == IA64_OPND_AR3) rop = 1; else abort (); if (CURR_SLOT.opnd[rop].X_op == O_register) { if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number)) mnemonic = "mov.i"; else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number)) mnemonic = "mov.m"; else rop = -1; } else abort (); if (rop >= 0) { ia64_free_opcode (idesc); idesc = ia64_find_opcode (mnemonic); while (idesc != NULL && (idesc->operands[0] != opnd1 || idesc->operands[1] != opnd2)) idesc = get_next_opcode (idesc); } } } else if (strcmp (idesc->name, "mov.i") == 0 || strcmp (idesc->name, "mov.m") == 0) { enum ia64_opnd opnd1, opnd2; int rop; opnd1 = idesc->operands[0]; opnd2 = idesc->operands[1]; if (opnd1 == IA64_OPND_AR3) rop = 0; else if (opnd2 == IA64_OPND_AR3) rop = 1; else abort (); if (CURR_SLOT.opnd[rop].X_op == O_register) { char unit = 'a'; if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number)) unit = 'i'; else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number)) unit = 'm'; if (unit != 'a' && unit != idesc->name [4]) as_bad (_("AR %d can only be accessed by %c-unit"), (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR), TOUPPER (unit)); } } else if (strcmp (idesc->name, "hint.b") == 0) { switch (md.hint_b) { case hint_b_ok: break; case hint_b_warning: as_warn (_("hint.b may be treated as nop")); break; case hint_b_error: as_bad (_("hint.b shouldn't be used")); break; } } qp_regno = 0; if (md.qp.X_op == O_register) { qp_regno = md.qp.X_add_number - REG_P; md.qp.X_op = O_absent; } flags = idesc->flags; if ((flags & IA64_OPCODE_FIRST) != 0) { /* The alignment frag has to end with a stop bit only if the next instruction after the alignment directive has to be the first instruction in an instruction group. */ if (align_frag) { while (align_frag->fr_type != rs_align_code) { align_frag = align_frag->fr_next; if (!align_frag) break; } /* align_frag can be NULL if there are directives in between. */ if (align_frag && align_frag->fr_next == frag_now) align_frag->tc_frag_data = 1; } insn_group_break (1, 0, 0); } align_frag = NULL; if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0) { as_bad (_("`%s' cannot be predicated"), idesc->name); goto done; } /* Build the instruction. */ CURR_SLOT.qp_regno = qp_regno; CURR_SLOT.idesc = idesc; as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line); dwarf2_where (&CURR_SLOT.debug_line); dwarf2_consume_line_info (); /* Add unwind entries, if there are any. */ if (unwind.current_entry) { CURR_SLOT.unwind_record = unwind.current_entry; unwind.current_entry = NULL; } if (unwind.pending_saves) { if (unwind.pending_saves->next) { /* Attach the next pending save to the next slot so that its slot number will get set correctly. */ add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR); unwind.pending_saves = &unwind.pending_saves->next->r.record.p; } else unwind.pending_saves = NULL; } if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym)) unwind.insn = 1; /* Check for dependency violations. */ if (md.detect_dv) check_dv (idesc); md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS; if (++md.num_slots_in_use >= NUM_SLOTS) emit_one_bundle (); if ((flags & IA64_OPCODE_LAST) != 0) insn_group_break (1, 0, 0); md.last_text_seg = now_seg; done: input_line_pointer = saved_input_line_pointer; } /* Called when symbol NAME cannot be found in the symbol table. Should be used for dynamic valued symbols only. */ symbolS * md_undefined_symbol (char *name ATTRIBUTE_UNUSED) { return 0; } /* Called for any expression that can not be recognized. When the function is called, `input_line_pointer' will point to the start of the expression. */ void md_operand (expressionS *e) { switch (*input_line_pointer) { case '[': ++input_line_pointer; expression_and_evaluate (e); if (*input_line_pointer != ']') { as_bad (_("Closing bracket missing")); goto err; } else { if (e->X_op != O_register || e->X_add_number < REG_GR || e->X_add_number > REG_GR + 127) { as_bad (_("Index must be a general register")); e->X_add_number = REG_GR; } ++input_line_pointer; e->X_op = O_index; } break; default: break; } return; err: ignore_rest_of_line (); } /* Return 1 if it's OK to adjust a reloc by replacing the symbol with a section symbol plus some offset. For relocs involving @fptr(), directives we don't want such adjustments since we need to have the original symbol's name in the reloc. */ int ia64_fix_adjustable (fixS *fix) { /* Prevent all adjustments to global symbols */ if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy)) return 0; switch (fix->fx_r_type) { case BFD_RELOC_IA64_FPTR64I: case BFD_RELOC_IA64_FPTR32MSB: case BFD_RELOC_IA64_FPTR32LSB: case BFD_RELOC_IA64_FPTR64MSB: case BFD_RELOC_IA64_FPTR64LSB: case BFD_RELOC_IA64_LTOFF_FPTR22: case BFD_RELOC_IA64_LTOFF_FPTR64I: return 0; default: break; } return 1; } int ia64_force_relocation (fixS *fix) { switch (fix->fx_r_type) { case BFD_RELOC_IA64_FPTR64I: case BFD_RELOC_IA64_FPTR32MSB: case BFD_RELOC_IA64_FPTR32LSB: case BFD_RELOC_IA64_FPTR64MSB: case BFD_RELOC_IA64_FPTR64LSB: case BFD_RELOC_IA64_LTOFF22: case BFD_RELOC_IA64_LTOFF64I: case BFD_RELOC_IA64_LTOFF_FPTR22: case BFD_RELOC_IA64_LTOFF_FPTR64I: case BFD_RELOC_IA64_PLTOFF22: case BFD_RELOC_IA64_PLTOFF64I: case BFD_RELOC_IA64_PLTOFF64MSB: case BFD_RELOC_IA64_PLTOFF64LSB: case BFD_RELOC_IA64_LTOFF22X: case BFD_RELOC_IA64_LDXMOV: return 1; default: break; } return generic_force_reloc (fix); } /* Decide from what point a pc-relative relocation is relative to, relative to the pc-relative fixup. Er, relatively speaking. */ long ia64_pcrel_from_section (fixS *fix, segT sec) { unsigned long off = fix->fx_frag->fr_address + fix->fx_where; if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE) off &= ~0xfUL; return off; } /* Used to emit section-relative relocs for the dwarf2 debug data. */ void ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size) { expressionS exp; exp.X_op = O_pseudo_fixup; exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym; exp.X_add_number = 0; exp.X_add_symbol = symbol; emit_expr (&exp, size); } /* This is called whenever some data item (not an instruction) needs a fixup. We pick the right reloc code depending on the byteorder currently in effect. */ void ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp, bfd_reloc_code_real_type code) { fixS *fix; switch (nbytes) { /* There are no reloc for 8 and 16 bit quantities, but we allow them here since they will work fine as long as the expression is fully defined at the end of the pass over the source file. */ case 1: code = BFD_RELOC_8; break; case 2: code = BFD_RELOC_16; break; case 4: if (target_big_endian) code = BFD_RELOC_IA64_DIR32MSB; else code = BFD_RELOC_IA64_DIR32LSB; break; case 8: /* In 32-bit mode, data8 could mean function descriptors too. */ if (exp->X_op == O_pseudo_fixup && exp->X_op_symbol && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC && !(md.flags & EF_IA_64_ABI64)) { if (target_big_endian) code = BFD_RELOC_IA64_IPLTMSB; else code = BFD_RELOC_IA64_IPLTLSB; exp->X_op = O_symbol; break; } else { if (target_big_endian) code = BFD_RELOC_IA64_DIR64MSB; else code = BFD_RELOC_IA64_DIR64LSB; break; } case 16: if (exp->X_op == O_pseudo_fixup && exp->X_op_symbol && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC) { if (target_big_endian) code = BFD_RELOC_IA64_IPLTMSB; else code = BFD_RELOC_IA64_IPLTLSB; exp->X_op = O_symbol; break; } /* FALLTHRU */ default: as_bad (_("Unsupported fixup size %d"), nbytes); ignore_rest_of_line (); return; } if (exp->X_op == O_pseudo_fixup) { exp->X_op = O_symbol; code = ia64_gen_real_reloc_type (exp->X_op_symbol, code); /* ??? If code unchanged, unsupported. */ } fix = fix_new_exp (f, where, nbytes, exp, 0, code); /* We need to store the byte order in effect in case we're going to fix an 8 or 16 bit relocation (for which there no real relocs available). See md_apply_fix(). */ fix->tc_fix_data.bigendian = target_big_endian; } /* Return the actual relocation we wish to associate with the pseudo reloc described by SYM and R_TYPE. SYM should be one of the symbols in the pseudo_func array, or NULL. */ static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type) { bfd_reloc_code_real_type newr = 0; const char *type = NULL, *suffix = ""; if (sym == NULL) { return r_type; } switch (S_GET_VALUE (sym)) { case FUNC_FPTR_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break; case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break; default: type = "FPTR"; break; } break; case FUNC_GP_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break; case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break; default: type = "GPREL"; break; } break; case FUNC_LT_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break; default: type = "LTOFF"; break; } break; case FUNC_LT_RELATIVE_X: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break; default: type = "LTOFF"; suffix = "X"; break; } break; case FUNC_PC_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break; case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break; default: type = "PCREL"; break; } break; case FUNC_PLT_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break; default: type = "PLTOFF"; break; } break; case FUNC_SEC_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break; default: type = "SECREL"; break; } break; case FUNC_SEG_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break; default: type = "SEGREL"; break; } break; case FUNC_LTV_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break; default: type = "LTV"; break; } break; case FUNC_LT_FPTR_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF_FPTR22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break; case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break; default: type = "LTOFF_FPTR"; break; } break; case FUNC_TP_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break; case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break; default: type = "TPREL"; break; } break; case FUNC_LT_TP_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF_TPREL22; break; default: type = "LTOFF_TPREL"; break; } break; case FUNC_DTP_MODULE: switch (r_type) { case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_DTPMOD64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_DTPMOD64LSB; break; default: type = "DTPMOD"; break; } break; case FUNC_LT_DTP_MODULE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break; default: type = "LTOFF_DTPMOD"; break; } break; case FUNC_DTP_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_DTPREL32MSB; break; case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_DTPREL32LSB; break; case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_DTPREL64MSB; break; case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_DTPREL64LSB; break; case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_DTPREL14; break; case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_DTPREL22; break; case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_DTPREL64I; break; default: type = "DTPREL"; break; } break; case FUNC_LT_DTP_RELATIVE: switch (r_type) { case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break; default: type = "LTOFF_DTPREL"; break; } break; case FUNC_IPLT_RELOC: switch (r_type) { case BFD_RELOC_IA64_IPLTMSB: return r_type; case BFD_RELOC_IA64_IPLTLSB: return r_type; default: type = "IPLT"; break; } break; #ifdef TE_VMS case FUNC_SLOTCOUNT_RELOC: return DUMMY_RELOC_IA64_SLOTCOUNT; #endif default: abort (); } if (newr) return newr; else { int width; if (!type) abort (); switch (r_type) { case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break; case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break; case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break; case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break; case BFD_RELOC_UNUSED: width = 13; break; case BFD_RELOC_IA64_IMM14: width = 14; break; case BFD_RELOC_IA64_IMM22: width = 22; break; case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break; default: abort (); } /* This should be an error, but since previously there wasn't any diagnostic here, don't make it fail because of this for now. */ as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix); return r_type; } } /* Here is where generate the appropriate reloc for pseudo relocation functions. */ void ia64_validate_fix (fixS *fix) { switch (fix->fx_r_type) { case BFD_RELOC_IA64_FPTR64I: case BFD_RELOC_IA64_FPTR32MSB: case BFD_RELOC_IA64_FPTR64LSB: case BFD_RELOC_IA64_LTOFF_FPTR22: case BFD_RELOC_IA64_LTOFF_FPTR64I: if (fix->fx_offset != 0) as_bad_where (fix->fx_file, fix->fx_line, _("No addend allowed in @fptr() relocation")); break; default: break; } } static void fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value) { bfd_vma insn[3], t0, t1, control_bits; const char *err; char *fixpos; long slot; slot = fix->fx_where & 0x3; fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot); /* Bundles are always in little-endian byte order */ t0 = bfd_getl64 (fixpos); t1 = bfd_getl64 (fixpos + 8); control_bits = t0 & 0x1f; insn[0] = (t0 >> 5) & 0x1ffffffffffLL; insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18); insn[2] = (t1 >> 23) & 0x1ffffffffffLL; err = NULL; if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64) { insn[1] = (value >> 22) & 0x1ffffffffffLL; insn[2] |= (((value & 0x7f) << 13) | (((value >> 7) & 0x1ff) << 27) | (((value >> 16) & 0x1f) << 22) | (((value >> 21) & 0x1) << 21) | (((value >> 63) & 0x1) << 36)); } else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62) { if (value & ~0x3fffffffffffffffULL) err = _("integer operand out of range"); insn[1] = (value >> 21) & 0x1ffffffffffLL; insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36)); } else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64) { value >>= 4; insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2; insn[2] |= ((((value >> 59) & 0x1) << 36) | (((value >> 0) & 0xfffff) << 13)); } else err = (*odesc->insert) (odesc, value, insn + slot); if (err) as_bad_where (fix->fx_file, fix->fx_line, "%s", err); t0 = control_bits | (insn[0] << 5) | (insn[1] << 46); t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23); number_to_chars_littleendian (fixpos + 0, t0, 8); number_to_chars_littleendian (fixpos + 8, t1, 8); } /* Attempt to simplify or even eliminate a fixup. The return value is ignored; perhaps it was once meaningful, but now it is historical. To indicate that a fixup has been eliminated, set FIXP->FX_DONE. If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry (if possible). */ void md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED) { char *fixpos; valueT value = *valP; fixpos = fix->fx_frag->fr_literal + fix->fx_where; if (fix->fx_pcrel) { switch (fix->fx_r_type) { case BFD_RELOC_IA64_PCREL21B: break; case BFD_RELOC_IA64_PCREL21BI: break; case BFD_RELOC_IA64_PCREL21F: break; case BFD_RELOC_IA64_PCREL21M: break; case BFD_RELOC_IA64_PCREL60B: break; case BFD_RELOC_IA64_PCREL22: break; case BFD_RELOC_IA64_PCREL64I: break; case BFD_RELOC_IA64_PCREL32MSB: break; case BFD_RELOC_IA64_PCREL32LSB: break; case BFD_RELOC_IA64_PCREL64MSB: break; case BFD_RELOC_IA64_PCREL64LSB: break; default: fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym, fix->fx_r_type); break; } } if (fix->fx_addsy) { switch ((unsigned) fix->fx_r_type) { case BFD_RELOC_UNUSED: /* This must be a TAG13 or TAG13b operand. There are no external relocs defined for them, so we must give an error. */ as_bad_where (fix->fx_file, fix->fx_line, _("%s must have a constant value"), elf64_ia64_operands[fix->tc_fix_data.opnd].desc); fix->fx_done = 1; return; case BFD_RELOC_IA64_TPREL14: case BFD_RELOC_IA64_TPREL22: case BFD_RELOC_IA64_TPREL64I: case BFD_RELOC_IA64_LTOFF_TPREL22: case BFD_RELOC_IA64_LTOFF_DTPMOD22: case BFD_RELOC_IA64_DTPREL14: case BFD_RELOC_IA64_DTPREL22: case BFD_RELOC_IA64_DTPREL64I: case BFD_RELOC_IA64_LTOFF_DTPREL22: S_SET_THREAD_LOCAL (fix->fx_addsy); break; #ifdef TE_VMS case DUMMY_RELOC_IA64_SLOTCOUNT: as_bad_where (fix->fx_file, fix->fx_line, _("cannot resolve @slotcount parameter")); fix->fx_done = 1; return; #endif default: break; } } else if (fix->tc_fix_data.opnd == IA64_OPND_NIL) { #ifdef TE_VMS if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT) { /* For @slotcount, convert an addresses difference to a slots difference. */ valueT v; v = (value >> 4) * 3; switch (value & 0x0f) { case 0: case 1: case 2: v += value & 0x0f; break; case 0x0f: v += 2; break; case 0x0e: v += 1; break; default: as_bad (_("invalid @slotcount value")); } value = v; } #endif if (fix->tc_fix_data.bigendian) number_to_chars_bigendian (fixpos, value, fix->fx_size); else number_to_chars_littleendian (fixpos, value, fix->fx_size); fix->fx_done = 1; } else { fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value); fix->fx_done = 1; } } /* Generate the BFD reloc to be stuck in the object file from the fixup used internally in the assembler. */ arelent * tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp) { arelent *reloc; reloc = xmalloc (sizeof (*reloc)); reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *)); *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; reloc->addend = fixp->fx_offset; reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type); if (!reloc->howto) { as_bad_where (fixp->fx_file, fixp->fx_line, _("Cannot represent %s relocation in object file"), bfd_get_reloc_code_name (fixp->fx_r_type)); free (reloc); return NULL; } return reloc; } /* Turn a string in input_line_pointer into a floating point constant of type TYPE, and store the appropriate bytes in *LIT. The number of LITTLENUMS emitted is stored in *SIZE. An error message is returned, or NULL on OK. */ #define MAX_LITTLENUMS 5 char * md_atof (int type, char *lit, int *size) { LITTLENUM_TYPE words[MAX_LITTLENUMS]; char *t; int prec; switch (type) { /* IEEE floats */ case 'f': case 'F': case 's': case 'S': prec = 2; break; case 'd': case 'D': case 'r': case 'R': prec = 4; break; case 'x': case 'X': case 'p': case 'P': prec = 5; break; default: *size = 0; return _("Unrecognized or unsupported floating point constant"); } t = atof_ieee (input_line_pointer, type, words); if (t) input_line_pointer = t; (*ia64_float_to_chars) (lit, words, prec); if (type == 'X') { /* It is 10 byte floating point with 6 byte padding. */ memset (&lit [10], 0, 6); *size = 8 * sizeof (LITTLENUM_TYPE); } else *size = prec * sizeof (LITTLENUM_TYPE); return NULL; } /* Handle ia64 specific semantics of the align directive. */ void ia64_md_do_align (int n ATTRIBUTE_UNUSED, const char *fill ATTRIBUTE_UNUSED, int len ATTRIBUTE_UNUSED, int max ATTRIBUTE_UNUSED) { if (subseg_text_p (now_seg)) ia64_flush_insns (); } /* This is called from HANDLE_ALIGN in write.c. Fill in the contents of an rs_align_code fragment. */ void ia64_handle_align (fragS *fragp) { int bytes; char *p; const unsigned char *nop_type; if (fragp->fr_type != rs_align_code) return; /* Check if this frag has to end with a stop bit. */ nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop; bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix; p = fragp->fr_literal + fragp->fr_fix; /* If no paddings are needed, we check if we need a stop bit. */ if (!bytes && fragp->tc_frag_data) { if (fragp->fr_fix < 16) #if 1 /* FIXME: It won't work with .align 16 alloc r32=ar.pfs,1,2,4,0 */ ; #else as_bad_where (fragp->fr_file, fragp->fr_line, _("Can't add stop bit to mark end of instruction group")); #endif else /* Bundles are always in little-endian byte order. Make sure the previous bundle has the stop bit. */ *(p - 16) |= 1; } /* Make sure we are on a 16-byte boundary, in case someone has been putting data into a text section. */ if (bytes & 15) { int fix = bytes & 15; memset (p, 0, fix); p += fix; bytes -= fix; fragp->fr_fix += fix; } /* Instruction bundles are always little-endian. */ memcpy (p, nop_type, 16); fragp->fr_var = 16; } static void ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words, int prec) { while (prec--) { number_to_chars_bigendian (lit, (long) (*words++), sizeof (LITTLENUM_TYPE)); lit += sizeof (LITTLENUM_TYPE); } } static void ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words, int prec) { while (prec--) { number_to_chars_littleendian (lit, (long) (words[prec]), sizeof (LITTLENUM_TYPE)); lit += sizeof (LITTLENUM_TYPE); } } void ia64_elf_section_change_hook (void) { if (elf_section_type (now_seg) == SHT_IA_64_UNWIND && elf_linked_to_section (now_seg) == NULL) elf_linked_to_section (now_seg) = text_section; dot_byteorder (-1); } /* Check if a label should be made global. */ void ia64_check_label (symbolS *label) { if (*input_line_pointer == ':') { S_SET_EXTERNAL (label); input_line_pointer++; } } /* Used to remember where .alias and .secalias directives are seen. We will rename symbol and section names when we are about to output the relocatable file. */ struct alias { char *file; /* The file where the directive is seen. */ unsigned int line; /* The line number the directive is at. */ const char *name; /* The original name of the symbol. */ }; /* Called for .alias and .secalias directives. If SECTION is 1, it is .secalias. Otherwise, it is .alias. */ static void dot_alias (int section) { char *name, *alias; char delim; char *end_name; int len; const char *error_string; struct alias *h; const char *a; struct hash_control *ahash, *nhash; const char *kind; delim = get_symbol_name (&name); end_name = input_line_pointer; *end_name = delim; if (name == end_name) { as_bad (_("expected symbol name")); ignore_rest_of_line (); return; } SKIP_WHITESPACE_AFTER_NAME (); if (*input_line_pointer != ',') { *end_name = 0; as_bad (_("expected comma after \"%s\""), name); *end_name = delim; ignore_rest_of_line (); return; } input_line_pointer++; *end_name = 0; ia64_canonicalize_symbol_name (name); /* We call demand_copy_C_string to check if alias string is valid. There should be a closing `"' and no `\0' in the string. */ alias = demand_copy_C_string (&len); if (alias == NULL) { ignore_rest_of_line (); return; } /* Make a copy of name string. */ len = strlen (name) + 1; obstack_grow (&notes, name, len); name = obstack_finish (&notes); if (section) { kind = "section"; ahash = secalias_hash; nhash = secalias_name_hash; } else { kind = "symbol"; ahash = alias_hash; nhash = alias_name_hash; } /* Check if alias has been used before. */ h = (struct alias *) hash_find (ahash, alias); if (h) { if (strcmp (h->name, name)) as_bad (_("`%s' is already the alias of %s `%s'"), alias, kind, h->name); goto out; } /* Check if name already has an alias. */ a = (const char *) hash_find (nhash, name); if (a) { if (strcmp (a, alias)) as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a); goto out; } h = (struct alias *) xmalloc (sizeof (struct alias)); as_where (&h->file, &h->line); h->name = name; error_string = hash_jam (ahash, alias, (void *) h); if (error_string) { as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"), alias, kind, error_string); goto out; } error_string = hash_jam (nhash, name, (void *) alias); if (error_string) { as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"), alias, kind, error_string); out: obstack_free (&notes, name); obstack_free (&notes, alias); } demand_empty_rest_of_line (); } /* It renames the original symbol name to its alias. */ static void do_alias (const char *alias, void *value) { struct alias *h = (struct alias *) value; symbolS *sym = symbol_find (h->name); if (sym == NULL) { #ifdef TE_VMS /* Uses .alias extensively to alias CRTL functions to same with decc$ prefix. Sometimes function gets optimized away and a warning results, which should be suppressed. */ if (strncmp (alias, "decc$", 5) != 0) #endif as_warn_where (h->file, h->line, _("symbol `%s' aliased to `%s' is not used"), h->name, alias); } else S_SET_NAME (sym, (char *) alias); } /* Called from write_object_file. */ void ia64_adjust_symtab (void) { hash_traverse (alias_hash, do_alias); } /* It renames the original section name to its alias. */ static void do_secalias (const char *alias, void *value) { struct alias *h = (struct alias *) value; segT sec = bfd_get_section_by_name (stdoutput, h->name); if (sec == NULL) as_warn_where (h->file, h->line, _("section `%s' aliased to `%s' is not used"), h->name, alias); else sec->name = alias; } /* Called from write_object_file. */ void ia64_frob_file (void) { hash_traverse (secalias_hash, do_secalias); } #ifdef TE_VMS #define NT_VMS_MHD 1 #define NT_VMS_LNM 2 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF .note section. */ /* Manufacture a VMS-like time string. */ static void get_vms_time (char *Now) { char *pnt; time_t timeb; time (&timeb); pnt = ctime (&timeb); pnt[3] = 0; pnt[7] = 0; pnt[10] = 0; pnt[16] = 0; pnt[24] = 0; sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11); } void ia64_vms_note (void) { char *p; asection *seg = now_seg; subsegT subseg = now_subseg; asection *secp = NULL; char *bname; char buf [256]; symbolS *sym; /* Create the .note section. */ secp = subseg_new (".note", 0); bfd_set_section_flags (stdoutput, secp, SEC_HAS_CONTENTS | SEC_READONLY); /* Module header note (MHD). */ bname = xstrdup (lbasename (out_file_name)); if ((p = strrchr (bname, '.'))) *p = '\0'; /* VMS note header is 24 bytes long. */ p = frag_more (8 + 8 + 8); number_to_chars_littleendian (p + 0, 8, 8); number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8); number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8); p = frag_more (8); strcpy (p, "IPF/VMS"); p = frag_more (17 + 17 + strlen (bname) + 1 + 5); get_vms_time (p); strcpy (p + 17, "24-FEB-2005 15:00"); p += 17 + 17; strcpy (p, bname); p += strlen (bname) + 1; free (bname); strcpy (p, "V1.0"); frag_align (3, 0, 0); /* Language processor name note. */ sprintf (buf, "GNU assembler version %s (%s) using BFD version %s", VERSION, TARGET_ALIAS, BFD_VERSION_STRING); p = frag_more (8 + 8 + 8); number_to_chars_littleendian (p + 0, 8, 8); number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8); number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8); p = frag_more (8); strcpy (p, "IPF/VMS"); p = frag_more (strlen (buf) + 1); strcpy (p, buf); frag_align (3, 0, 0); secp = subseg_new (".vms_display_name_info", 0); bfd_set_section_flags (stdoutput, secp, SEC_HAS_CONTENTS | SEC_READONLY); /* This symbol should be passed on the command line and be variable according to language. */ sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl", absolute_section, 0, &zero_address_frag); symbol_table_insert (sym); symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC; p = frag_more (4); /* Format 3 of VMS demangler Spec. */ number_to_chars_littleendian (p, 3, 4); p = frag_more (4); /* Place holder for symbol table index of above symbol. */ number_to_chars_littleendian (p, -1, 4); frag_align (3, 0, 0); /* We probably can't restore the current segment, for there likely isn't one yet... */ if (seg && subseg) subseg_set (seg, subseg); } #endif /* TE_VMS */
gpl-2.0
xsilon/qemu
hw/ssi/xilinx_spips.c
11
17927
/* * QEMU model of the Xilinx Zynq SPI controller * * Copyright (c) 2012 Peter A. G. Crosthwaite * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "hw/sysbus.h" #include "sysemu/sysemu.h" #include "hw/ptimer.h" #include "qemu/log.h" #include "qemu/fifo8.h" #include "hw/ssi.h" #include "qemu/bitops.h" #ifdef XILINX_SPIPS_ERR_DEBUG #define DB_PRINT(...) do { \ fprintf(stderr, ": %s: ", __func__); \ fprintf(stderr, ## __VA_ARGS__); \ } while (0); #else #define DB_PRINT(...) #endif /* config register */ #define R_CONFIG (0x00 / 4) #define IFMODE (1 << 31) #define ENDIAN (1 << 26) #define MODEFAIL_GEN_EN (1 << 17) #define MAN_START_COM (1 << 16) #define MAN_START_EN (1 << 15) #define MANUAL_CS (1 << 14) #define CS (0xF << 10) #define CS_SHIFT (10) #define PERI_SEL (1 << 9) #define REF_CLK (1 << 8) #define FIFO_WIDTH (3 << 6) #define BAUD_RATE_DIV (7 << 3) #define CLK_PH (1 << 2) #define CLK_POL (1 << 1) #define MODE_SEL (1 << 0) /* interrupt mechanism */ #define R_INTR_STATUS (0x04 / 4) #define R_INTR_EN (0x08 / 4) #define R_INTR_DIS (0x0C / 4) #define R_INTR_MASK (0x10 / 4) #define IXR_TX_FIFO_UNDERFLOW (1 << 6) #define IXR_RX_FIFO_FULL (1 << 5) #define IXR_RX_FIFO_NOT_EMPTY (1 << 4) #define IXR_TX_FIFO_FULL (1 << 3) #define IXR_TX_FIFO_NOT_FULL (1 << 2) #define IXR_TX_FIFO_MODE_FAIL (1 << 1) #define IXR_RX_FIFO_OVERFLOW (1 << 0) #define IXR_ALL ((IXR_TX_FIFO_UNDERFLOW<<1)-1) #define R_EN (0x14 / 4) #define R_DELAY (0x18 / 4) #define R_TX_DATA (0x1C / 4) #define R_RX_DATA (0x20 / 4) #define R_SLAVE_IDLE_COUNT (0x24 / 4) #define R_TX_THRES (0x28 / 4) #define R_RX_THRES (0x2C / 4) #define R_TXD1 (0x80 / 4) #define R_TXD2 (0x84 / 4) #define R_TXD3 (0x88 / 4) #define R_LQSPI_CFG (0xa0 / 4) #define R_LQSPI_CFG_RESET 0x03A002EB #define LQSPI_CFG_LQ_MODE (1 << 31) #define LQSPI_CFG_TWO_MEM (1 << 30) #define LQSPI_CFG_SEP_BUS (1 << 30) #define LQSPI_CFG_U_PAGE (1 << 28) #define LQSPI_CFG_MODE_EN (1 << 25) #define LQSPI_CFG_MODE_WIDTH 8 #define LQSPI_CFG_MODE_SHIFT 16 #define LQSPI_CFG_DUMMY_WIDTH 3 #define LQSPI_CFG_DUMMY_SHIFT 8 #define LQSPI_CFG_INST_CODE 0xFF #define R_LQSPI_STS (0xA4 / 4) #define LQSPI_STS_WR_RECVD (1 << 1) #define R_MOD_ID (0xFC / 4) #define R_MAX (R_MOD_ID+1) /* size of TXRX FIFOs */ #define RXFF_A 32 #define TXFF_A 32 /* 16MB per linear region */ #define LQSPI_ADDRESS_BITS 24 /* Bite off 4k chunks at a time */ #define LQSPI_CACHE_SIZE 1024 #define SNOOP_CHECKING 0xFF #define SNOOP_NONE 0xFE #define SNOOP_STRIPING 0 typedef enum { READ = 0x3, FAST_READ = 0xb, DOR = 0x3b, QOR = 0x6b, DIOR = 0xbb, QIOR = 0xeb, PP = 0x2, DPP = 0xa2, QPP = 0x32, } FlashCMD; typedef struct { SysBusDevice busdev; MemoryRegion iomem; MemoryRegion mmlqspi; qemu_irq irq; int irqline; uint8_t num_cs; uint8_t num_busses; uint8_t snoop_state; qemu_irq *cs_lines; SSIBus **spi; Fifo8 rx_fifo; Fifo8 tx_fifo; uint8_t num_txrx_bytes; uint32_t regs[R_MAX]; uint32_t lqspi_buf[LQSPI_CACHE_SIZE]; hwaddr lqspi_cached_addr; } XilinxSPIPS; #define TYPE_XILINX_SPIPS "xilinx,spips" #define XILINX_SPIPS(obj) \ OBJECT_CHECK(XilinxSPIPS, (obj), TYPE_XILINX_SPIPS) static inline int num_effective_busses(XilinxSPIPS *s) { return (s->regs[R_LQSPI_CFG] & LQSPI_CFG_SEP_BUS && s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1; } static void xilinx_spips_update_cs_lines(XilinxSPIPS *s) { int i, j; bool found = false; int field = s->regs[R_CONFIG] >> CS_SHIFT; for (i = 0; i < s->num_cs; i++) { for (j = 0; j < num_effective_busses(s); j++) { int upage = !!(s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE); int cs_to_set = (j * s->num_cs + i + upage) % (s->num_cs * s->num_busses); if (~field & (1 << i) && !found) { DB_PRINT("selecting slave %d\n", i); qemu_set_irq(s->cs_lines[cs_to_set], 0); } else { qemu_set_irq(s->cs_lines[cs_to_set], 1); } } if (~field & (1 << i)) { found = true; } } if (!found) { s->snoop_state = SNOOP_CHECKING; } } static void xilinx_spips_update_ixr(XilinxSPIPS *s) { /* These are set/cleared as they occur */ s->regs[R_INTR_STATUS] &= (IXR_TX_FIFO_UNDERFLOW | IXR_RX_FIFO_OVERFLOW | IXR_TX_FIFO_MODE_FAIL); /* these are pure functions of fifo state, set them here */ s->regs[R_INTR_STATUS] |= (fifo8_is_full(&s->rx_fifo) ? IXR_RX_FIFO_FULL : 0) | (s->rx_fifo.num >= s->regs[R_RX_THRES] ? IXR_RX_FIFO_NOT_EMPTY : 0) | (fifo8_is_full(&s->tx_fifo) ? IXR_TX_FIFO_FULL : 0) | (s->tx_fifo.num < s->regs[R_TX_THRES] ? IXR_TX_FIFO_NOT_FULL : 0); /* drive external interrupt pin */ int new_irqline = !!(s->regs[R_INTR_MASK] & s->regs[R_INTR_STATUS] & IXR_ALL); if (new_irqline != s->irqline) { s->irqline = new_irqline; qemu_set_irq(s->irq, s->irqline); } } static void xilinx_spips_reset(DeviceState *d) { XilinxSPIPS *s = XILINX_SPIPS(d); int i; for (i = 0; i < R_MAX; i++) { s->regs[i] = 0; } fifo8_reset(&s->rx_fifo); fifo8_reset(&s->rx_fifo); /* non zero resets */ s->regs[R_CONFIG] |= MODEFAIL_GEN_EN; s->regs[R_SLAVE_IDLE_COUNT] = 0xFF; s->regs[R_TX_THRES] = 1; s->regs[R_RX_THRES] = 1; /* FIXME: move magic number definition somewhere sensible */ s->regs[R_MOD_ID] = 0x01090106; s->regs[R_LQSPI_CFG] = R_LQSPI_CFG_RESET; s->snoop_state = SNOOP_CHECKING; xilinx_spips_update_ixr(s); xilinx_spips_update_cs_lines(s); } static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) { for (;;) { int i; uint8_t rx; uint8_t tx = 0; for (i = 0; i < num_effective_busses(s); ++i) { if (!i || s->snoop_state == SNOOP_STRIPING) { if (fifo8_is_empty(&s->tx_fifo)) { s->regs[R_INTR_STATUS] |= IXR_TX_FIFO_UNDERFLOW; xilinx_spips_update_ixr(s); return; } else { tx = fifo8_pop(&s->tx_fifo); } } rx = ssi_transfer(s->spi[i], (uint32_t)tx); DB_PRINT("tx = %02x rx = %02x\n", tx, rx); if (!i || s->snoop_state == SNOOP_STRIPING) { if (fifo8_is_full(&s->rx_fifo)) { s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW; DB_PRINT("rx FIFO overflow"); } else { fifo8_push(&s->rx_fifo, (uint8_t)rx); } } } switch (s->snoop_state) { case (SNOOP_CHECKING): switch (tx) { /* new instruction code */ case READ: /* 3 address bytes, no dummy bytes/cycles */ case PP: case DPP: case QPP: s->snoop_state = 3; break; case FAST_READ: /* 3 address bytes, 1 dummy byte */ case DOR: case QOR: case DIOR: /* FIXME: these vary between vendor - set to spansion */ s->snoop_state = 4; break; case QIOR: /* 3 address bytes, 2 dummy bytes */ s->snoop_state = 6; break; default: s->snoop_state = SNOOP_NONE; } break; case (SNOOP_STRIPING): case (SNOOP_NONE): break; default: s->snoop_state--; } } } static inline void rx_data_bytes(XilinxSPIPS *s, uint32_t *value, int max) { int i; *value = 0; for (i = 0; i < max && !fifo8_is_empty(&s->rx_fifo); ++i) { uint32_t next = fifo8_pop(&s->rx_fifo) & 0xFF; *value |= next << 8 * (s->regs[R_CONFIG] & ENDIAN ? 3-i : i); } } static uint64_t xilinx_spips_read(void *opaque, hwaddr addr, unsigned size) { XilinxSPIPS *s = opaque; uint32_t mask = ~0; uint32_t ret; addr >>= 2; switch (addr) { case R_CONFIG: mask = 0x0002FFFF; break; case R_INTR_STATUS: case R_INTR_MASK: mask = IXR_ALL; break; case R_EN: mask = 0x1; break; case R_SLAVE_IDLE_COUNT: mask = 0xFF; break; case R_MOD_ID: mask = 0x01FFFFFF; break; case R_INTR_EN: case R_INTR_DIS: case R_TX_DATA: mask = 0; break; case R_RX_DATA: rx_data_bytes(s, &ret, s->num_txrx_bytes); DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret); xilinx_spips_update_ixr(s); return ret; } DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr * 4, s->regs[addr] & mask); return s->regs[addr] & mask; } static inline void tx_data_bytes(XilinxSPIPS *s, uint32_t value, int num) { int i; for (i = 0; i < num && !fifo8_is_full(&s->tx_fifo); ++i) { if (s->regs[R_CONFIG] & ENDIAN) { fifo8_push(&s->tx_fifo, (uint8_t)(value >> 24)); value <<= 8; } else { fifo8_push(&s->tx_fifo, (uint8_t)value); value >>= 8; } } } static void xilinx_spips_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { int mask = ~0; int man_start_com = 0; XilinxSPIPS *s = opaque; DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr, (unsigned)value); addr >>= 2; switch (addr) { case R_CONFIG: mask = 0x0002FFFF; if (value & MAN_START_COM) { man_start_com = 1; } break; case R_INTR_STATUS: mask = IXR_ALL; s->regs[R_INTR_STATUS] &= ~(mask & value); goto no_reg_update; case R_INTR_DIS: mask = IXR_ALL; s->regs[R_INTR_MASK] &= ~(mask & value); goto no_reg_update; case R_INTR_EN: mask = IXR_ALL; s->regs[R_INTR_MASK] |= mask & value; goto no_reg_update; case R_EN: mask = 0x1; break; case R_SLAVE_IDLE_COUNT: mask = 0xFF; break; case R_RX_DATA: case R_INTR_MASK: case R_MOD_ID: mask = 0; break; case R_TX_DATA: tx_data_bytes(s, (uint32_t)value, s->num_txrx_bytes); goto no_reg_update; case R_TXD1: tx_data_bytes(s, (uint32_t)value, 1); goto no_reg_update; case R_TXD2: tx_data_bytes(s, (uint32_t)value, 2); goto no_reg_update; case R_TXD3: tx_data_bytes(s, (uint32_t)value, 3); goto no_reg_update; } s->regs[addr] = (s->regs[addr] & ~mask) | (value & mask); no_reg_update: if (man_start_com) { xilinx_spips_flush_txfifo(s); } xilinx_spips_update_ixr(s); xilinx_spips_update_cs_lines(s); } static const MemoryRegionOps spips_ops = { .read = xilinx_spips_read, .write = xilinx_spips_write, .endianness = DEVICE_LITTLE_ENDIAN, }; #define LQSPI_CACHE_SIZE 1024 static uint64_t lqspi_read(void *opaque, hwaddr addr, unsigned int size) { int i; XilinxSPIPS *s = opaque; if (addr >= s->lqspi_cached_addr && addr <= s->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) { return s->lqspi_buf[(addr - s->lqspi_cached_addr) >> 2]; } else { int flash_addr = (addr / num_effective_busses(s)); int slave = flash_addr >> LQSPI_ADDRESS_BITS; int cache_entry = 0; DB_PRINT("config reg status: %08x\n", s->regs[R_LQSPI_CFG]); fifo8_reset(&s->tx_fifo); fifo8_reset(&s->rx_fifo); s->regs[R_CONFIG] &= ~CS; s->regs[R_CONFIG] |= (~(1 << slave) << CS_SHIFT) & CS; xilinx_spips_update_cs_lines(s); /* instruction */ DB_PRINT("pushing read instruction: %02x\n", (uint8_t)(s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE)); fifo8_push(&s->tx_fifo, s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE); /* read address */ DB_PRINT("pushing read address %06x\n", flash_addr); fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 16)); fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 8)); fifo8_push(&s->tx_fifo, (uint8_t)flash_addr); /* mode bits */ if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_MODE_EN) { fifo8_push(&s->tx_fifo, extract32(s->regs[R_LQSPI_CFG], LQSPI_CFG_MODE_SHIFT, LQSPI_CFG_MODE_WIDTH)); } /* dummy bytes */ for (i = 0; i < (extract32(s->regs[R_LQSPI_CFG], LQSPI_CFG_DUMMY_SHIFT, LQSPI_CFG_DUMMY_WIDTH)); ++i) { DB_PRINT("pushing dummy byte\n"); fifo8_push(&s->tx_fifo, 0); } xilinx_spips_flush_txfifo(s); fifo8_reset(&s->rx_fifo); DB_PRINT("starting QSPI data read\n"); for (i = 0; i < LQSPI_CACHE_SIZE / 4; ++i) { tx_data_bytes(s, 0, 4); xilinx_spips_flush_txfifo(s); rx_data_bytes(s, &s->lqspi_buf[cache_entry], 4); cache_entry++; } s->regs[R_CONFIG] |= CS; xilinx_spips_update_cs_lines(s); s->lqspi_cached_addr = addr; return lqspi_read(opaque, addr, size); } } static const MemoryRegionOps lqspi_ops = { .read = lqspi_read, .endianness = DEVICE_NATIVE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4 } }; static void xilinx_spips_realize(DeviceState *dev, Error **errp) { XilinxSPIPS *s = XILINX_SPIPS(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); int i; DB_PRINT("inited device model\n"); s->spi = g_new(SSIBus *, s->num_busses); for (i = 0; i < s->num_busses; ++i) { char bus_name[16]; snprintf(bus_name, 16, "spi%d", i); s->spi[i] = ssi_create_bus(dev, bus_name); } s->cs_lines = g_new0(qemu_irq, s->num_cs * s->num_busses); ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi[0]); ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi[1]); sysbus_init_irq(sbd, &s->irq); for (i = 0; i < s->num_cs * s->num_busses; ++i) { sysbus_init_irq(sbd, &s->cs_lines[i]); } memory_region_init_io(&s->iomem, &spips_ops, s, "spi", R_MAX*4); sysbus_init_mmio(sbd, &s->iomem); memory_region_init_io(&s->mmlqspi, &lqspi_ops, s, "lqspi", (1 << LQSPI_ADDRESS_BITS) * 2); sysbus_init_mmio(sbd, &s->mmlqspi); s->irqline = -1; s->lqspi_cached_addr = ~0ULL; fifo8_create(&s->rx_fifo, RXFF_A); fifo8_create(&s->tx_fifo, TXFF_A); } static int xilinx_spips_post_load(void *opaque, int version_id) { xilinx_spips_update_ixr((XilinxSPIPS *)opaque); xilinx_spips_update_cs_lines((XilinxSPIPS *)opaque); return 0; } static const VMStateDescription vmstate_xilinx_spips = { .name = "xilinx_spips", .version_id = 2, .minimum_version_id = 2, .minimum_version_id_old = 2, .post_load = xilinx_spips_post_load, .fields = (VMStateField[]) { VMSTATE_FIFO8(tx_fifo, XilinxSPIPS), VMSTATE_FIFO8(rx_fifo, XilinxSPIPS), VMSTATE_UINT32_ARRAY(regs, XilinxSPIPS, R_MAX), VMSTATE_UINT8(snoop_state, XilinxSPIPS), VMSTATE_END_OF_LIST() } }; static Property xilinx_spips_properties[] = { DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1), DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4), DEFINE_PROP_UINT8("num-txrx-bytes", XilinxSPIPS, num_txrx_bytes, 1), DEFINE_PROP_END_OF_LIST(), }; static void xilinx_spips_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = xilinx_spips_realize; dc->reset = xilinx_spips_reset; dc->props = xilinx_spips_properties; dc->vmsd = &vmstate_xilinx_spips; } static const TypeInfo xilinx_spips_info = { .name = TYPE_XILINX_SPIPS, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(XilinxSPIPS), .class_init = xilinx_spips_class_init, }; static void xilinx_spips_register_types(void) { type_register_static(&xilinx_spips_info); } type_init(xilinx_spips_register_types)
gpl-2.0
surkovalex/xbmc
lib/libUPnP/Neptune/Source/Data/TLS/Base/NptTlsTrustAnchor_Base_0120.cpp
267
7195
/***************************************************************** | | Neptune - Trust Anchors | | This file is automatically generated by a script, do not edit! | | Copyright (c) 2002-2010, Axiomatic Systems, LLC. | All rights reserved. | | Redistribution and use in source and binary forms, with or without | modification, are permitted provided that the following conditions are met: | * Redistributions of source code must retain the above copyright | notice, this list of conditions and the following disclaimer. | * Redistributions in binary form must reproduce the above copyright | notice, this list of conditions and the following disclaimer in the | documentation and/or other materials provided with the distribution. | * Neither the name of Axiomatic Systems nor the | names of its contributors may be used to endorse or promote products | derived from this software without specific prior written permission. | | THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ****************************************************************/ /* IGC/A */ const unsigned char NptTlsTrustAnchor_Base_0120_Data[1030] = { 0x30,0x82,0x04,0x02,0x30,0x82,0x02,0xea ,0xa0,0x03,0x02,0x01,0x02,0x02,0x05,0x39 ,0x11,0x45,0x10,0x94,0x30,0x0d,0x06,0x09 ,0x2a,0x86,0x48,0x86,0xf7,0x0d,0x01,0x01 ,0x05,0x05,0x00,0x30,0x81,0x85,0x31,0x0b ,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13 ,0x02,0x46,0x52,0x31,0x0f,0x30,0x0d,0x06 ,0x03,0x55,0x04,0x08,0x13,0x06,0x46,0x72 ,0x61,0x6e,0x63,0x65,0x31,0x0e,0x30,0x0c ,0x06,0x03,0x55,0x04,0x07,0x13,0x05,0x50 ,0x61,0x72,0x69,0x73,0x31,0x10,0x30,0x0e ,0x06,0x03,0x55,0x04,0x0a,0x13,0x07,0x50 ,0x4d,0x2f,0x53,0x47,0x44,0x4e,0x31,0x0e ,0x30,0x0c,0x06,0x03,0x55,0x04,0x0b,0x13 ,0x05,0x44,0x43,0x53,0x53,0x49,0x31,0x0e ,0x30,0x0c,0x06,0x03,0x55,0x04,0x03,0x13 ,0x05,0x49,0x47,0x43,0x2f,0x41,0x31,0x23 ,0x30,0x21,0x06,0x09,0x2a,0x86,0x48,0x86 ,0xf7,0x0d,0x01,0x09,0x01,0x16,0x14,0x69 ,0x67,0x63,0x61,0x40,0x73,0x67,0x64,0x6e ,0x2e,0x70,0x6d,0x2e,0x67,0x6f,0x75,0x76 ,0x2e,0x66,0x72,0x30,0x1e,0x17,0x0d,0x30 ,0x32,0x31,0x32,0x31,0x33,0x31,0x34,0x32 ,0x39,0x32,0x33,0x5a,0x17,0x0d,0x32,0x30 ,0x31,0x30,0x31,0x37,0x31,0x34,0x32,0x39 ,0x32,0x32,0x5a,0x30,0x81,0x85,0x31,0x0b ,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13 ,0x02,0x46,0x52,0x31,0x0f,0x30,0x0d,0x06 ,0x03,0x55,0x04,0x08,0x13,0x06,0x46,0x72 ,0x61,0x6e,0x63,0x65,0x31,0x0e,0x30,0x0c ,0x06,0x03,0x55,0x04,0x07,0x13,0x05,0x50 ,0x61,0x72,0x69,0x73,0x31,0x10,0x30,0x0e ,0x06,0x03,0x55,0x04,0x0a,0x13,0x07,0x50 ,0x4d,0x2f,0x53,0x47,0x44,0x4e,0x31,0x0e ,0x30,0x0c,0x06,0x03,0x55,0x04,0x0b,0x13 ,0x05,0x44,0x43,0x53,0x53,0x49,0x31,0x0e ,0x30,0x0c,0x06,0x03,0x55,0x04,0x03,0x13 ,0x05,0x49,0x47,0x43,0x2f,0x41,0x31,0x23 ,0x30,0x21,0x06,0x09,0x2a,0x86,0x48,0x86 ,0xf7,0x0d,0x01,0x09,0x01,0x16,0x14,0x69 ,0x67,0x63,0x61,0x40,0x73,0x67,0x64,0x6e ,0x2e,0x70,0x6d,0x2e,0x67,0x6f,0x75,0x76 ,0x2e,0x66,0x72,0x30,0x82,0x01,0x22,0x30 ,0x0d,0x06,0x09,0x2a,0x86,0x48,0x86,0xf7 ,0x0d,0x01,0x01,0x01,0x05,0x00,0x03,0x82 ,0x01,0x0f,0x00,0x30,0x82,0x01,0x0a,0x02 ,0x82,0x01,0x01,0x00,0xb2,0x1f,0xd1,0xd0 ,0x62,0xc5,0x33,0x3b,0xc0,0x04,0x86,0x88 ,0xb3,0xdc,0xf8,0x88,0xf7,0xfd,0xdf,0x43 ,0xdf,0x7a,0x8d,0x9a,0x49,0x5c,0xf6,0x4e ,0xaa,0xcc,0x1c,0xb9,0xa1,0xeb,0x27,0x89 ,0xf2,0x46,0xe9,0x3b,0x4a,0x71,0xd5,0x1d ,0x8e,0x2d,0xcf,0xe6,0xad,0xab,0x63,0x50 ,0xc7,0x54,0x0b,0x6e,0x12,0xc9,0x90,0x36 ,0xc6,0xd8,0x2f,0xda,0x91,0xaa,0x68,0xc5 ,0x72,0xfe,0x17,0x0a,0xb2,0x17,0x7e,0x79 ,0xb5,0x32,0x88,0x70,0xca,0x70,0xc0,0x96 ,0x4a,0x8e,0xe4,0x55,0xcd,0x1d,0x27,0x94 ,0xbf,0xce,0x72,0x2a,0xec,0x5c,0xf9,0x73 ,0x20,0xfe,0xbd,0xf7,0x2e,0x89,0x67,0xb8 ,0xbb,0x47,0x73,0x12,0xf7,0xd1,0x35,0x69 ,0x3a,0xf2,0x0a,0xb9,0xae,0xff,0x46,0x42 ,0x46,0xa2,0xbf,0xa1,0x85,0x1a,0xf9,0xbf ,0xe4,0xff,0x49,0x85,0xf7,0xa3,0x70,0x86 ,0x32,0x1c,0x5d,0x9f,0x60,0xf7,0xa9,0xad ,0xa5,0xff,0xcf,0xd1,0x34,0xf9,0x7d,0x5b ,0x17,0xc6,0xdc,0xd6,0x0e,0x28,0x6b,0xc2 ,0xdd,0xf1,0xf5,0x33,0x68,0x9d,0x4e,0xfc ,0x87,0x7c,0x36,0x12,0xd6,0xa3,0x80,0xe8 ,0x43,0x0d,0x55,0x61,0x94,0xea,0x64,0x37 ,0x47,0xea,0x77,0xca,0xd0,0xb2,0x58,0x05 ,0xc3,0x5d,0x7e,0xb1,0xa8,0x46,0x90,0x31 ,0x56,0xce,0x70,0x2a,0x96,0xb2,0x30,0xb8 ,0x77,0xe6,0x79,0xc0,0xbd,0x29,0x3b,0xfd ,0x94,0x77,0x4c,0xbd,0x20,0xcd,0x41,0x25 ,0xe0,0x2e,0xc7,0x1b,0xbb,0xee,0xa4,0x04 ,0x41,0xd2,0x5d,0xad,0x12,0x6a,0x8a,0x9b ,0x47,0xfb,0xc9,0xdd,0x46,0x40,0xe1,0x9d ,0x3c,0x33,0xd0,0xb5,0x02,0x03,0x01,0x00 ,0x01,0xa3,0x77,0x30,0x75,0x30,0x0f,0x06 ,0x03,0x55,0x1d,0x13,0x01,0x01,0xff,0x04 ,0x05,0x30,0x03,0x01,0x01,0xff,0x30,0x0b ,0x06,0x03,0x55,0x1d,0x0f,0x04,0x04,0x03 ,0x02,0x01,0x46,0x30,0x15,0x06,0x03,0x55 ,0x1d,0x20,0x04,0x0e,0x30,0x0c,0x30,0x0a ,0x06,0x08,0x2a,0x81,0x7a,0x01,0x79,0x01 ,0x01,0x01,0x30,0x1d,0x06,0x03,0x55,0x1d ,0x0e,0x04,0x16,0x04,0x14,0xa3,0x05,0x2f ,0x18,0x60,0x50,0xc2,0x89,0x0a,0xdd,0x2b ,0x21,0x4f,0xff,0x8e,0x4e,0xa8,0x30,0x31 ,0x36,0x30,0x1f,0x06,0x03,0x55,0x1d,0x23 ,0x04,0x18,0x30,0x16,0x80,0x14,0xa3,0x05 ,0x2f,0x18,0x60,0x50,0xc2,0x89,0x0a,0xdd ,0x2b,0x21,0x4f,0xff,0x8e,0x4e,0xa8,0x30 ,0x31,0x36,0x30,0x0d,0x06,0x09,0x2a,0x86 ,0x48,0x86,0xf7,0x0d,0x01,0x01,0x05,0x05 ,0x00,0x03,0x82,0x01,0x01,0x00,0x05,0xdc ,0x26,0xd8,0xfa,0x77,0x15,0x44,0x68,0xfc ,0x2f,0x66,0x3a,0x74,0xe0,0x5d,0xe4,0x29 ,0xff,0x06,0x07,0x13,0x84,0x4a,0xab,0xcf ,0x6d,0xa0,0x1f,0x51,0x94,0xf8,0x49,0xcb ,0x74,0x36,0x14,0xbc,0x15,0xdd,0xdb,0x89 ,0x2f,0xdd,0x8f,0xa0,0x5d,0x7c,0xf5,0x12 ,0xeb,0x9f,0x9e,0x38,0xa4,0x47,0xcc,0xb3 ,0x96,0xd9,0xbe,0x9c,0x25,0xab,0x03,0x7e ,0x33,0x0f,0x95,0x81,0x0d,0xfd,0x16,0xe0 ,0x88,0xbe,0x37,0xf0,0x6c,0x5d,0xd0,0x31 ,0x9b,0x32,0x2b,0x5d,0x17,0x65,0x93,0x98 ,0x60,0xbc,0x6e,0x8f,0xb1,0xa8,0x3c,0x1e ,0xd9,0x1c,0xf3,0xa9,0x26,0x42,0xf9,0x64 ,0x1d,0xc2,0xe7,0x92,0xf6,0xf4,0x1e,0x5a ,0xaa,0x19,0x52,0x5d,0xaf,0xe8,0xa2,0xf7 ,0x60,0xa0,0xf6,0x8d,0xf0,0x89,0xf5,0x6e ,0xe0,0x0a,0x05,0x01,0x95,0xc9,0x8b,0x20 ,0x0a,0xba,0x5a,0xfc,0x9a,0x2c,0x3c,0xbd ,0xc3,0xb7,0xc9,0x5d,0x78,0x25,0x05,0x3f ,0x56,0x14,0x9b,0x0c,0xda,0xfb,0x3a,0x48 ,0xfe,0x97,0x69,0x5e,0xca,0x10,0x86,0xf7 ,0x4e,0x96,0x04,0x08,0x4d,0xec,0xb0,0xbe ,0x5d,0xdc,0x3b,0x8e,0x4f,0xc1,0xfd,0x9a ,0x36,0x34,0x9a,0x4c,0x54,0x7e,0x17,0x03 ,0x48,0x95,0x08,0x11,0x1c,0x07,0x6f,0x85 ,0x08,0x7e,0x5d,0x4d,0xc4,0x9d,0xdb,0xfb ,0xae,0xce,0xb2,0xd1,0xb3,0xb8,0x83,0x6c ,0x1d,0xb2,0xb3,0x79,0xf1,0xd8,0x70,0x99 ,0x7e,0xf0,0x13,0x02,0xce,0x5e,0xdd,0x51 ,0xd3,0xdf,0x36,0x81,0xa1,0x1b,0x78,0x2f ,0x71,0xb3,0xf1,0x59,0x4c,0x46,0x18,0x28 ,0xab,0x85,0xd2,0x60,0x56,0x5a}; const unsigned int NptTlsTrustAnchor_Base_0120_Size = 1030;
gpl-2.0